mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-02 18:18:17 -03:30
Compare commits
1254 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
02cd5418c2 | ||
|
|
c7683f33cb | ||
|
|
49e3665d96 | ||
|
|
e95ba800ea | ||
|
|
5d9bb300d7 | ||
|
|
afcd5997b9 | ||
|
|
f73717ea35 | ||
|
|
1967963702 | ||
|
|
76dd0cd777 | ||
|
|
d87b6fd9f3 | ||
|
|
a6a47dbc96 | ||
|
|
61791bbb3d | ||
|
|
298c6cb790 | ||
|
|
a561ee6207 | ||
|
|
3fa7468d54 | ||
|
|
bc3abad602 | ||
|
|
d75b5d6931 | ||
|
|
02bf742e15 | ||
|
|
d07f75b389 | ||
|
|
2d34781259 | ||
|
|
cdb63a8c49 | ||
|
|
44a0626fc8 | ||
|
|
45eac53ec7 | ||
|
|
e42203a13e | ||
|
|
4ba25326ed | ||
|
|
dca4777347 | ||
|
|
e113d1ccab | ||
|
|
112ccfa9db | ||
|
|
0ed1919a38 | ||
|
|
ff003cfa3c | ||
|
|
6c954df636 | ||
|
|
981e61fb51 | ||
|
|
5db1c3eef7 | ||
|
|
88765f62e6 | ||
|
|
0f35e17e23 | ||
|
|
77b3f9bb97 | ||
|
|
09f93d9e0c | ||
|
|
45f15bf753 | ||
|
|
913cc5a9af | ||
|
|
a46acfcdd8 | ||
|
|
0c0f6b755d | ||
|
|
ecda4e3a8c | ||
|
|
4c12b273ac | ||
|
|
b68854f79d | ||
|
|
f954bc0a5a | ||
|
|
7b8359df4d | ||
|
|
66b61866cd | ||
|
|
3736bfa04a | ||
|
|
dfc46f02d7 | ||
|
|
9086665013 | ||
|
|
0210e53bb7 | ||
|
|
ca40d51bc6 | ||
|
|
ca6a07f595 | ||
|
|
b5bd959a97 | ||
|
|
973e7372b4 | ||
|
|
b54e091886 | ||
|
|
6c220e4e4b | ||
|
|
2511e14289 | ||
|
|
0f5ea5474c | ||
|
|
6567b8e012 | ||
|
|
aee3ec682e | ||
|
|
428a554ddb | ||
|
|
32f4194cf8 | ||
|
|
6f3ff70b17 | ||
|
|
76bb5f8d75 | ||
|
|
4b98537f79 | ||
|
|
cac2196ad5 | ||
|
|
ba24fe3226 | ||
|
|
3004791c64 | ||
|
|
b1a7889ff5 | ||
|
|
92fc2df214 | ||
|
|
4f714b07b8 | ||
|
|
eb4038a6b9 | ||
|
|
4c0e9ba890 | ||
|
|
deac627dc7 | ||
|
|
6ee3c053b7 | ||
|
|
16961f69f2 | ||
|
|
b9b028a735 | ||
|
|
5fe144aa0f | ||
|
|
5b0da4279f | ||
|
|
1ac978b8fa | ||
|
|
c1a2e9a8c6 | ||
|
|
195d6d791a | ||
|
|
aa301c31d1 | ||
|
|
d9418b1dc4 | ||
|
|
2c89a02db3 | ||
|
|
0ca08e03af | ||
|
|
15efdf0c16 | ||
|
|
ab8760cc83 | ||
|
|
b6da596ec1 | ||
|
|
3c12c6beb3 | ||
|
|
26caad4f12 | ||
|
|
8ece922ef0 | ||
|
|
887a468d32 | ||
|
|
859a7f32fb | ||
|
|
1f28764ca1 | ||
|
|
76cb37d6b5 | ||
|
|
7ddd4cd38c | ||
|
|
c1eb975545 | ||
|
|
414b739641 | ||
|
|
572ab650db | ||
|
|
e296ccb4d0 | ||
|
|
72c2a8982b | ||
|
|
13c57147eb | ||
|
|
7e58b96328 | ||
|
|
ac4a71452e | ||
|
|
03bcfa7ff5 | ||
|
|
af5f376163 | ||
|
|
004b0a3fcf | ||
|
|
4bb7d2b566 | ||
|
|
94a0562c93 | ||
|
|
f619eb08b1 | ||
|
|
55195fe546 | ||
|
|
5711074c5a | ||
|
|
4a705b3fba | ||
|
|
31e386886f | ||
|
|
4d85e3765e | ||
|
|
f0a04b4d65 | ||
|
|
760ca1c3a9 | ||
|
|
23b3833806 | ||
|
|
daeeae1a91 | ||
|
|
c8f857eae4 | ||
|
|
270d21f5c1 | ||
|
|
bf29198efd | ||
|
|
db4e225342 | ||
|
|
9ebbf1c3cd | ||
|
|
ef7f5edbb3 | ||
|
|
0b5404b2b7 | ||
|
|
19e1b11d98 | ||
|
|
0df32b03ca | ||
|
|
72a4223884 | ||
|
|
03117d9572 | ||
|
|
c78f5393c3 | ||
|
|
fda49564bf | ||
|
|
ed48b6e4b7 | ||
|
|
848fc323db | ||
|
|
e6f57f27ee | ||
|
|
015ea62e92 | ||
|
|
2ca7087018 | ||
|
|
d665f14682 | ||
|
|
e375678674 | ||
|
|
076b5c153f | ||
|
|
d33a482c91 | ||
|
|
d64839e7d2 | ||
|
|
31705a502d | ||
|
|
5f5d0ffe14 | ||
|
|
4f7479d94d | ||
|
|
9511178666 | ||
|
|
b8d1652baf | ||
|
|
f7dc73b830 | ||
|
|
8eac37fabd | ||
|
|
1d0415a6cf | ||
|
|
3f5c60886b | ||
|
|
a75598b3f4 | ||
|
|
60a057cace | ||
|
|
dd9d0c0530 | ||
|
|
9fa995ac9d | ||
|
|
f07734596e | ||
|
|
caec3de364 | ||
|
|
60bfc56e8e | ||
|
|
206e24448b | ||
|
|
4175431dcd | ||
|
|
bb1eb9fec8 | ||
|
|
b0d7115e9b | ||
|
|
f8ebd08e75 | ||
|
|
6ac7840195 | ||
|
|
30e4b89837 | ||
|
|
405c711edb | ||
|
|
0e6b4e80f7 | ||
|
|
9949782e96 | ||
|
|
bbb6e7b3da | ||
|
|
bc68188209 | ||
|
|
d3780e181e | ||
|
|
2e202051e3 | ||
|
|
448c1d5faa | ||
|
|
ff2b8e5e60 | ||
|
|
8b71ef8ceb | ||
|
|
ee8f678010 | ||
|
|
6425c837d5 | ||
|
|
a6b918c1a1 | ||
|
|
c025ab4eb4 | ||
|
|
ae30009fbc | ||
|
|
158d775306 | ||
|
|
9d540165c0 | ||
|
|
0cb51e7530 | ||
|
|
13e47e73c8 | ||
|
|
6c4e5e0e3d | ||
|
|
d2fd7b7462 | ||
|
|
d9453f323b | ||
|
|
b787b76c6c | ||
|
|
a94a407a43 | ||
|
|
96e46c4209 | ||
|
|
aa30fa8009 | ||
|
|
ebfee51aca | ||
|
|
8b6a6a5a28 | ||
|
|
14ac7d797b | ||
|
|
f253691a68 | ||
|
|
038da7255f | ||
|
|
73cd24bf5a | ||
|
|
4ee9cb2be9 | ||
|
|
f1d2f84043 | ||
|
|
b9a949820a | ||
|
|
50e5f0d28b | ||
|
|
1481f7d64b | ||
|
|
7d33650019 | ||
|
|
728598b230 | ||
|
|
e40368ae2b | ||
|
|
4ff17cb5a5 | ||
|
|
b7e6dd0dd4 | ||
|
|
8ee2091955 | ||
|
|
3fac550090 | ||
|
|
d29a1db134 | ||
|
|
653d97dda4 | ||
|
|
5364160d6a | ||
|
|
1a35948ff6 | ||
|
|
40c0f3756b | ||
|
|
3d6fd49179 | ||
|
|
d843e3d562 | ||
|
|
d8d5474dcc | ||
|
|
788e41a315 | ||
|
|
1bcc641dae | ||
|
|
f8fed0f308 | ||
|
|
d1e6632e6a | ||
|
|
710295bd2f | ||
|
|
3e2d68cd32 | ||
|
|
f3788525ff | ||
|
|
39d247a238 | ||
|
|
b37144b0b2 | ||
|
|
2e0b33f754 | ||
|
|
adc3f79c23 | ||
|
|
7904b454ba | ||
|
|
d264da8f08 | ||
|
|
6abe78ff46 | ||
|
|
9a4aa4288c | ||
|
|
50e3ccfa2b | ||
|
|
69a3c33ceb | ||
|
|
649b1ae868 | ||
|
|
973cc12ca9 | ||
|
|
436de45dd4 | ||
|
|
5f186a2835 | ||
|
|
ecec94ee7e | ||
|
|
196995a1a7 | ||
|
|
3a714fd4ac | ||
|
|
2132ec0269 | ||
|
|
c47fdc9aa0 | ||
|
|
5c4cfb54ae | ||
|
|
cd153a1fb3 | ||
|
|
b0ab92c921 | ||
|
|
5007a69eee | ||
|
|
8a46e050e3 | ||
|
|
256fd12da5 | ||
|
|
8e36ad09b4 | ||
|
|
96a92503cb | ||
|
|
5253153dbb | ||
|
|
12c78e622b | ||
|
|
216bf2e867 | ||
|
|
a086686e9f | ||
|
|
6402004018 | ||
|
|
955f833120 | ||
|
|
f4476f25bd | ||
|
|
8960d5bcfa | ||
|
|
605738757d | ||
|
|
569613f2a4 | ||
|
|
cc182ea2f3 | ||
|
|
3f96b2da7a | ||
|
|
9e44f94176 | ||
|
|
f94a7c6d82 | ||
|
|
dbf40bbbb8 | ||
|
|
954aae931e | ||
|
|
0b1200bb49 | ||
|
|
646d473e8e | ||
|
|
6975cd1622 | ||
|
|
b7f9bf43c2 | ||
|
|
388b627f72 | ||
|
|
f9019ab116 | ||
|
|
07657aecf4 | ||
|
|
e65904eee3 | ||
|
|
89847d5684 | ||
|
|
dada98143c | ||
|
|
713efff78e | ||
|
|
585303ad66 | ||
|
|
a800ed094b | ||
|
|
84e47f4aaa | ||
|
|
46ff9ce765 | ||
|
|
e31eb199c5 | ||
|
|
fd46442188 | ||
|
|
9837b7926f | ||
|
|
5aeaa248d4 | ||
|
|
739f6c78ad | ||
|
|
b75b6b513b | ||
|
|
2a3b48edaf | ||
|
|
7c7b33a0f8 | ||
|
|
40d72d1865 | ||
|
|
cdc2e7d4fe | ||
|
|
2628663590 | ||
|
|
5cc77eb6fd | ||
|
|
a1aa9d79c0 | ||
|
|
8b21034b31 | ||
|
|
67ffd8e923 | ||
|
|
af7edf4dff | ||
|
|
0fd3b9f7af | ||
|
|
7ef9f4dfdd | ||
|
|
6ce507f39f | ||
|
|
34cab91e86 | ||
|
|
63de9bdba3 | ||
|
|
afb6e7dfc3 | ||
|
|
ad89d1c876 | ||
|
|
6b80ac6500 | ||
|
|
2257dc9baa | ||
|
|
a40d9f3c72 | ||
|
|
977e7ae105 | ||
|
|
bc0fc5df98 | ||
|
|
810c10a0e9 | ||
|
|
bb469005b2 | ||
|
|
89ade65ad6 | ||
|
|
128d3ef94c | ||
|
|
b7e06085c7 | ||
|
|
8875e25fe9 | ||
|
|
44f9739750 | ||
|
|
9e85a023c1 | ||
|
|
b6698e686a | ||
|
|
66bd570584 | ||
|
|
e2c5a3895b | ||
|
|
fe719c1bc1 | ||
|
|
89fe6505f9 | ||
|
|
4b5f780ff0 | ||
|
|
31659efe13 | ||
|
|
2bd3776ddb | ||
|
|
c874f16c02 | ||
|
|
ba91304636 | ||
|
|
42a0f46268 | ||
|
|
d84ff06f73 | ||
|
|
87f33a4644 | ||
|
|
2d69b05c77 | ||
|
|
2eb57ee5cd | ||
|
|
85c69c2a4a | ||
|
|
c20f38b89c | ||
|
|
bfe196236f | ||
|
|
d4c61d2628 | ||
|
|
deef47c923 | ||
|
|
c19d8994b9 | ||
|
|
2de6da25a8 | ||
|
|
f13e76d022 | ||
|
|
95e2bde15b | ||
|
|
5c0a41a6e0 | ||
|
|
6424928ba3 | ||
|
|
4c280e59d4 | ||
|
|
56b7400dac | ||
|
|
d095a1bb96 | ||
|
|
76a89039ad | ||
|
|
f90e509bf6 | ||
|
|
dd8902bfcd | ||
|
|
911af3f331 | ||
|
|
e2f083f885 | ||
|
|
e5a450349b | ||
|
|
7a20d69809 | ||
|
|
c187ae22e5 | ||
|
|
cb202a76df | ||
|
|
e1d139db2e | ||
|
|
51e695066a | ||
|
|
ce25fa4302 | ||
|
|
c403b61383 | ||
|
|
3ef7c25a16 | ||
|
|
442d211ee3 | ||
|
|
c0aad0a6d5 | ||
|
|
5903aea86f | ||
|
|
f4a68eae01 | ||
|
|
4a36b091f4 | ||
|
|
874ec8fc73 | ||
|
|
41ca67bf54 | ||
|
|
d72232f15b | ||
|
|
03c61685fb | ||
|
|
46284198f8 | ||
|
|
9916100835 | ||
|
|
bbb1da1a83 | ||
|
|
cf183288dd | ||
|
|
07075add3d | ||
|
|
338238d086 | ||
|
|
c6c74616d8 | ||
|
|
03bb729fea | ||
|
|
60460c025c | ||
|
|
f8a59446e8 | ||
|
|
a37c642127 | ||
|
|
4e61fb9cd3 | ||
|
|
b472c2df98 | ||
|
|
17f9242b58 | ||
|
|
bc67deee78 | ||
|
|
f57abae01e | ||
|
|
275b1d6897 | ||
|
|
e9a676951b | ||
|
|
b31d905704 | ||
|
|
c70c44b07b | ||
|
|
20583e3d15 | ||
|
|
9f4588cd0c | ||
|
|
b25e0f82b1 | ||
|
|
cae1c683aa | ||
|
|
57e7a5a34a | ||
|
|
230f1e1208 | ||
|
|
7bce70339f | ||
|
|
e1aaef7d4d | ||
|
|
1a1d154e14 | ||
|
|
384e5dd4c4 | ||
|
|
abfb147292 | ||
|
|
44eb03f78a | ||
|
|
857784747b | ||
|
|
7a2cb5e41c | ||
|
|
e662ed4adc | ||
|
|
712bdfc82f | ||
|
|
34bd47de79 | ||
|
|
fe57c13b51 | ||
|
|
f9df692056 | ||
|
|
f193b12059 | ||
|
|
2cd254954c | ||
|
|
4dab92ce69 | ||
|
|
d53f45d4e2 | ||
|
|
ca08614641 | ||
|
|
47adf4bce6 | ||
|
|
e69979d5a2 | ||
|
|
2ae68df41b | ||
|
|
7928cd20fb | ||
|
|
ad9049a49e | ||
|
|
dfcd60a9e2 | ||
|
|
0684df804d | ||
|
|
f8d6b84cb6 | ||
|
|
a980731bed | ||
|
|
b4e264251f | ||
|
|
8006a6cd82 | ||
|
|
a69db4169b | ||
|
|
5cd6b0c753 | ||
|
|
36ead3a720 | ||
|
|
bb339265fc | ||
|
|
bb4446e94c | ||
|
|
d2102671cd | ||
|
|
138e0c2301 | ||
|
|
37cfd289d8 | ||
|
|
9f3081580a | ||
|
|
2b6781bc65 | ||
|
|
a3248379db | ||
|
|
0774c8385c | ||
|
|
b2d30d68e7 | ||
|
|
82d10b882c | ||
|
|
24ae85fa56 | ||
|
|
1869aa3985 | ||
|
|
95b8ac5f62 | ||
|
|
0b4168cad4 | ||
|
|
3289472e31 | ||
|
|
4ad53339f6 | ||
|
|
a4d3da6a8e | ||
|
|
7954ea2525 | ||
|
|
bd1f0bcfd7 | ||
|
|
bc2e26d7ef | ||
|
|
fd80013917 | ||
|
|
f7d52564aa | ||
|
|
f7e8d1149a | ||
|
|
bd091caaf9 | ||
|
|
b455a1bf76 | ||
|
|
c0a3bcf9b3 | ||
|
|
5eedb5562f | ||
|
|
dc6c703741 | ||
|
|
16629d0b8e | ||
|
|
7f79210ed1 | ||
|
|
27a1a697e7 | ||
|
|
c1267004ef | ||
|
|
9cdd2214f9 | ||
|
|
fc29764911 | ||
|
|
989e9174c2 | ||
|
|
3993e12335 | ||
|
|
ac4d782937 | ||
|
|
32d18ca992 | ||
|
|
2df4b6c5d2 | ||
|
|
088d36da09 | ||
|
|
6f36faa4f9 | ||
|
|
3846384d56 | ||
|
|
331f141f63 | ||
|
|
62dd3d2a9d | ||
|
|
fa8a128e49 | ||
|
|
b10c308a5a | ||
|
|
e22c70e431 | ||
|
|
f4fe9e3421 | ||
|
|
da173615e4 | ||
|
|
dc6a17e092 | ||
|
|
f4180503c8 | ||
|
|
240d4193ae | ||
|
|
ac66e98ae9 | ||
|
|
d2935ffed0 | ||
|
|
c6e0fcea31 | ||
|
|
5d014d986b | ||
|
|
714994cad8 | ||
|
|
08fe61e058 | ||
|
|
0c8bed21ee | ||
|
|
98eb845f8c | ||
|
|
98300e3165 | ||
|
|
e22759d8f0 | ||
|
|
bf1411060e | ||
|
|
a4d142368b | ||
|
|
eb80f9b606 | ||
|
|
ae47b617e3 | ||
|
|
c116b8022e | ||
|
|
5b98e15613 | ||
|
|
e5b4011aa4 | ||
|
|
3125f93b3f | ||
|
|
f19c8e8c1d | ||
|
|
20779df686 | ||
|
|
752fba1691 | ||
|
|
637604d08f | ||
|
|
ba7b1d74d0 | ||
|
|
1a9989ade9 | ||
|
|
11844c987c | ||
|
|
8c45c88d15 | ||
|
|
c87bb2f239 | ||
|
|
32eeb9a0e0 | ||
|
|
df21fc8643 | ||
|
|
ffbdf31ac4 | ||
|
|
ccd9cc3dce | ||
|
|
81867402f6 | ||
|
|
4f5d61212b | ||
|
|
ef96123482 | ||
|
|
ee27ab0052 | ||
|
|
57f87ba083 | ||
|
|
a9bb72c6fd | ||
|
|
9506c2e597 | ||
|
|
32884357ff | ||
|
|
278ac08087 | ||
|
|
88204642b7 | ||
|
|
1401286910 | ||
|
|
12eb242224 | ||
|
|
8f36a02998 | ||
|
|
88f9e25f76 | ||
|
|
dba1c13954 | ||
|
|
df9faa1743 | ||
|
|
74fd975b57 | ||
|
|
ce85bcaee7 | ||
|
|
6eb6e806e7 | ||
|
|
6ed2a60978 | ||
|
|
fd04c14260 | ||
|
|
10a5273f07 | ||
|
|
bac3bf1a5f | ||
|
|
e3b684df21 | ||
|
|
e45b30d033 | ||
|
|
ad6fecefa8 | ||
|
|
3fdb2ccf55 | ||
|
|
29f5b55d42 | ||
|
|
5aef52e8c0 | ||
|
|
336e0cbf70 | ||
|
|
3cd06b0eb4 | ||
|
|
6bb46e3ecb | ||
|
|
127bc01857 | ||
|
|
a6975c1850 | ||
|
|
b2cb0725ac | ||
|
|
b974b144a8 | ||
|
|
bfb25fa47b | ||
|
|
3bb505d43f | ||
|
|
b135bcb9d9 | ||
|
|
4e97225424 | ||
|
|
0771cd8599 | ||
|
|
91d848f98a | ||
|
|
40edf8c6f5 | ||
|
|
e78562830f | ||
|
|
bef259a6eb | ||
|
|
39ce1bd8be | ||
|
|
6291881943 | ||
|
|
802fd94dad | ||
|
|
66f38a1b31 | ||
|
|
d3850a4da5 | ||
|
|
53a4355e60 | ||
|
|
18a616f57c | ||
|
|
32333eb627 | ||
|
|
19def41fdf | ||
|
|
44b9dce134 | ||
|
|
fa5a538fe5 | ||
|
|
5e3fd2253f | ||
|
|
9643c2c1e3 | ||
|
|
93f3614382 | ||
|
|
cbc8a7d679 | ||
|
|
290bc993a5 | ||
|
|
3694657eb6 | ||
|
|
79417e07ca | ||
|
|
dad95c873b | ||
|
|
626b35e1b0 | ||
|
|
5881ba43f8 | ||
|
|
fed7b97dcb | ||
|
|
c4458c9d9a | ||
|
|
7bae2a4547 | ||
|
|
aeb3e647d4 | ||
|
|
fe036cbe77 | ||
|
|
952ec65a40 | ||
|
|
b8788421d5 | ||
|
|
c2347db934 | ||
|
|
27ead5d4fa | ||
|
|
591ae700ce | ||
|
|
6ade7c0a8d | ||
|
|
b3745f2614 | ||
|
|
ca8a9c600a | ||
|
|
a0225507a0 | ||
|
|
d39a88d63f | ||
|
|
e5d353d0a7 | ||
|
|
de422c822d | ||
|
|
4d3326b542 | ||
|
|
1b82138142 | ||
|
|
208ff8e350 | ||
|
|
ec54b36e05 | ||
|
|
38e8522cbf | ||
|
|
52f8687397 | ||
|
|
43600ffcf8 | ||
|
|
938d2d9e6e | ||
|
|
9368dbe0e7 | ||
|
|
fe3290601a | ||
|
|
e7173e1d62 | ||
|
|
8aafe64397 | ||
|
|
2140303fcc | ||
|
|
b80ded63ca | ||
|
|
7be2521a31 | ||
|
|
15b9d54a32 | ||
|
|
bc1a4e12ad | ||
|
|
67419e8d0a | ||
|
|
849aaf7435 | ||
|
|
a89ee8c406 | ||
|
|
0c6f172e75 | ||
|
|
a67349b076 | ||
|
|
f9b68a5d17 | ||
|
|
c7910b51a1 | ||
|
|
1f99710b21 | ||
|
|
5e558c361b | ||
|
|
5f39efcdfd | ||
|
|
037edf1215 | ||
|
|
37125866ca | ||
|
|
421e73b87c | ||
|
|
0d8de289dd | ||
|
|
00916dec38 | ||
|
|
c115e5677e | ||
|
|
56047c1c83 | ||
|
|
09d85631dc | ||
|
|
f25e4dc3ed | ||
|
|
a3a7c2d24e | ||
|
|
0126168472 | ||
|
|
e9f795c5ce | ||
|
|
0c7e1889e4 | ||
|
|
8b2bec700a | ||
|
|
125267544e | ||
|
|
0d55ed3600 | ||
|
|
ad0cd6939a | ||
|
|
a1244d7bd3 | ||
|
|
33adb334cd | ||
|
|
ef87a8a1f0 | ||
|
|
5223a80ab8 | ||
|
|
a595c84f7e | ||
|
|
adcfcc1178 | ||
|
|
b158dbcf79 | ||
|
|
ab3832f3e7 | ||
|
|
9bf415f749 | ||
|
|
a2bda9e5f1 | ||
|
|
0195725563 | ||
|
|
ec1170bd37 | ||
|
|
66c67dbe73 | ||
|
|
e5d8d8234d | ||
|
|
16ae2c1809 | ||
|
|
5c5e879c2c | ||
|
|
4771716ab2 | ||
|
|
b156585739 | ||
|
|
7a77b5c419 | ||
|
|
9872b594bf | ||
|
|
e6c88db0a0 | ||
|
|
257280a050 | ||
|
|
520103df78 | ||
|
|
3e3787de15 | ||
|
|
0c824d5ef1 | ||
|
|
c0e989b17c | ||
|
|
5218b3af82 | ||
|
|
ef0a91da27 | ||
|
|
8412181746 | ||
|
|
400ee2aa57 | ||
|
|
05b8466f87 | ||
|
|
6061c691e6 | ||
|
|
3ac967a7b6 | ||
|
|
19962f6b6a | ||
|
|
f7703dbca3 | ||
|
|
74a9eedb93 | ||
|
|
6df104b275 | ||
|
|
b27453d8d8 | ||
|
|
4470ee4ccf | ||
|
|
df27fd1e9c | ||
|
|
97c68810e0 | ||
|
|
8a86acf75d | ||
|
|
160e479f8d | ||
|
|
d738acf638 | ||
|
|
84d92aa3c7 | ||
|
|
dd01cabcdc | ||
|
|
e196adb98c | ||
|
|
c383c7e2c1 | ||
|
|
958bb5285d | ||
|
|
f0317ae70b | ||
|
|
591941bd39 | ||
|
|
e90769c869 | ||
|
|
256bbb1a8a | ||
|
|
2c7c956be9 | ||
|
|
fe81bba08d | ||
|
|
564de07963 | ||
|
|
84cf6fbe83 | ||
|
|
d9160f19c0 | ||
|
|
ba0a03a8ba | ||
|
|
b0f04d925a | ||
|
|
7b78e68727 | ||
|
|
ec53b8b66a | ||
|
|
6e949bf951 | ||
|
|
86fb669fd3 | ||
|
|
7123956ecd | ||
|
|
46cf6b77cf | ||
|
|
a52bc44f5a | ||
|
|
acb63a57fa | ||
|
|
5b08277ce4 | ||
|
|
5dc56df64e | ||
|
|
33c4d64b62 | ||
|
|
25de6825df | ||
|
|
0b60201a1e | ||
|
|
cfea99c4ee | ||
|
|
cea41a544e | ||
|
|
8371a060a0 | ||
|
|
7ed140cea7 | ||
|
|
cb97c2184e | ||
|
|
0b4fcc83bd | ||
|
|
514359e556 | ||
|
|
55b9d02a99 | ||
|
|
fc9a65be2b | ||
|
|
49dff97d9c | ||
|
|
4efb0b78fa | ||
|
|
c9fe8fde59 | ||
|
|
74d54946bf | ||
|
|
16462292e1 | ||
|
|
7ef1e1ef9d | ||
|
|
20d80311f0 | ||
|
|
f1a1f53f72 | ||
|
|
3acc42c5b3 | ||
|
|
c766bd077b | ||
|
|
54320c5b09 | ||
|
|
291b71ea3b | ||
|
|
356515222a | ||
|
|
688e589e0c | ||
|
|
6c98201aa4 | ||
|
|
d4b10eb9f5 | ||
|
|
728d56e74d | ||
|
|
a9f4038fcd | ||
|
|
77f1d4b0f1 | ||
|
|
d78577c810 | ||
|
|
5fb6b2eaf7 | ||
|
|
404caa111a | ||
|
|
b838468500 | ||
|
|
f2235be1d3 | ||
|
|
6ec45b10f1 | ||
|
|
d9879d8026 | ||
|
|
d487b2f927 | ||
|
|
66e5e14bac | ||
|
|
7e4668859b | ||
|
|
92d038062e | ||
|
|
2972bceb90 | ||
|
|
cb0a60a0fe | ||
|
|
3ee91e15ff | ||
|
|
ef47a73382 | ||
|
|
dc515e5ac5 | ||
|
|
56763d4288 | ||
|
|
ad9fa73301 | ||
|
|
10dd049912 | ||
|
|
4209f1cbfd | ||
|
|
ee83e874a8 | ||
|
|
27ed73e3e3 | ||
|
|
e41c0532e3 | ||
|
|
eeb7274d65 | ||
|
|
eb0dcf6063 | ||
|
|
83be0735cd | ||
|
|
fe4ba51d1a | ||
|
|
adf575b75e | ||
|
|
e5426f74a8 | ||
|
|
f5212d3b79 | ||
|
|
3d09c4be75 | ||
|
|
f2db15873d | ||
|
|
7c663de6c9 | ||
|
|
c14bbcdbf2 | ||
|
|
1be4c1935a | ||
|
|
764b1aa5f8 | ||
|
|
d13b07ba59 | ||
|
|
028afab908 | ||
|
|
55dfae2a52 | ||
|
|
994324e19c | ||
|
|
b81c0d869c | ||
|
|
f14f04c5ea | ||
|
|
9c86da1403 | ||
|
|
cb611b5ed0 | ||
|
|
891269ef39 | ||
|
|
ab171a1d6d | ||
|
|
a56738324a | ||
|
|
da61b8e7c9 | ||
|
|
d6d58bc938 | ||
|
|
e42cb43ca5 | ||
|
|
ca541c7e4a | ||
|
|
96e14424f0 | ||
|
|
47830896e8 | ||
|
|
5fd4b4afae | ||
|
|
dae9f6d3c2 | ||
|
|
8e1210f96e | ||
|
|
56aa683f28 | ||
|
|
1b9a6d7ad8 | ||
|
|
f591c4db56 | ||
|
|
371fa51e82 | ||
|
|
a927ed2da4 | ||
|
|
a55675acf8 | ||
|
|
25dd3d476a | ||
|
|
3ff5f40bdb | ||
|
|
689ded0413 | ||
|
|
327ed157ef | ||
|
|
c819238da9 | ||
|
|
477afa8711 | ||
|
|
bd272e0b3c | ||
|
|
1067595b5c | ||
|
|
14c232e3c4 | ||
|
|
57f5fb1f4f | ||
|
|
bcddfb786d | ||
|
|
20db1738fa | ||
|
|
b23d81f825 | ||
|
|
bc15ceaba1 | ||
|
|
6f17d0817b | ||
|
|
a1cde03b20 | ||
|
|
cfce23950a | ||
|
|
64740249ab | ||
|
|
126f42de06 | ||
|
|
d94e3a81eb | ||
|
|
70d0235770 | ||
|
|
30b5493fd6 | ||
|
|
4f6362515f | ||
|
|
dbbe9419e5 | ||
|
|
188bae142b | ||
|
|
7c2b12ebd7 | ||
|
|
ef8e35e39b | ||
|
|
975accbe1d | ||
|
|
aaa27d0a34 | ||
|
|
9302ce0036 | ||
|
|
0aab3c97a0 | ||
|
|
8e731337ba | ||
|
|
b294db5aed | ||
|
|
8d766a2ca9 | ||
|
|
f2ae16e71d | ||
|
|
ac281476c8 | ||
|
|
1b1c8d31a9 | ||
|
|
4b587aaf99 | ||
|
|
016301508e | ||
|
|
6744726089 | ||
|
|
0a89f88b89 | ||
|
|
69fac8ea58 | ||
|
|
a51104e844 | ||
|
|
943aaf84e5 | ||
|
|
e8bde03a50 | ||
|
|
75b13caf0b | ||
|
|
0f231f0e76 | ||
|
|
5d99fa0940 | ||
|
|
649388188b | ||
|
|
9fa1873a65 | ||
|
|
f2057dd43d | ||
|
|
eeffbbb43c | ||
|
|
aaa0105f75 | ||
|
|
f29a42721f | ||
|
|
079d317ade | ||
|
|
6f1fd12265 | ||
|
|
e16b57aa05 | ||
|
|
fb30f65951 | ||
|
|
a47aaae078 | ||
|
|
7117614ee5 | ||
|
|
e26aec96b0 | ||
|
|
c60d104056 | ||
|
|
e6ff8c92a0 | ||
|
|
9bce364b3c | ||
|
|
cbaa2b5773 | ||
|
|
0453ed8235 | ||
|
|
a341adb7f3 | ||
|
|
4c88ac69f2 | ||
|
|
85c237bc1d | ||
|
|
35d48cc88c | ||
|
|
957b7115fe | ||
|
|
82eedbd622 | ||
|
|
b930b0ef5a | ||
|
|
ad313c9d49 | ||
|
|
06035c0f4e | ||
|
|
e1384f6618 | ||
|
|
3acb86805b | ||
|
|
bf0af1cd3d | ||
|
|
c77d11f1c7 | ||
|
|
d279d145d5 | ||
|
|
fc7905653e | ||
|
|
660282e82f | ||
|
|
77602dbb93 | ||
|
|
a3e6896a43 | ||
|
|
702ce446df | ||
|
|
8ae77e955e | ||
|
|
783924e671 | ||
|
|
93304e5f58 | ||
|
|
917373ee55 | ||
|
|
7a98ad50b4 | ||
|
|
982058cc19 | ||
|
|
576beaa6a6 | ||
|
|
6eb22c5db2 | ||
|
|
72a0d78b3c | ||
|
|
13d08af054 | ||
|
|
80a7ae9845 | ||
|
|
6c30a7b2eb | ||
|
|
76b72338da | ||
|
|
a39e78d42d | ||
|
|
4550dccb84 | ||
|
|
01ce09f343 | ||
|
|
71dca67ca2 | ||
|
|
327f9baccf | ||
|
|
a98b866a66 | ||
|
|
3aabba7535 | ||
|
|
c22cfa255b | ||
|
|
af211b3d71 | ||
|
|
6bb3463e7c | ||
|
|
8b151d12b9 | ||
|
|
ecb6dc3679 | ||
|
|
49a223a17d | ||
|
|
e5cfdc648c | ||
|
|
9f9f70aade | ||
|
|
e91c04f586 | ||
|
|
277fa6c12d | ||
|
|
ca3050ec3d | ||
|
|
1b3ced152b | ||
|
|
97031f9133 | ||
|
|
c92506e2e7 | ||
|
|
65a9772adf | ||
|
|
1e07ee6cc4 | ||
|
|
01a130273f | ||
|
|
3c710219a1 | ||
|
|
2ba285a544 | ||
|
|
72ae7638bc | ||
|
|
3bfad5ca73 | ||
|
|
668d02846d | ||
|
|
781f31d2b8 | ||
|
|
df28db0066 | ||
|
|
20183f3860 | ||
|
|
48edf1757b | ||
|
|
2645e88b0c | ||
|
|
db121049b3 | ||
|
|
8058cdbc0e | ||
|
|
31d357284a | ||
|
|
4ee77ce026 | ||
|
|
8373129588 | ||
|
|
9a3c6f236d | ||
|
|
55ba81fee5 | ||
|
|
bc5159a1f5 | ||
|
|
af007c7189 | ||
|
|
dc79d07303 | ||
|
|
79167c7577 | ||
|
|
08dd057864 | ||
|
|
fee3f288c0 | ||
|
|
b22bef5cfb | ||
|
|
7ad5523113 | ||
|
|
460b5824c3 | ||
|
|
b0a28b1e80 | ||
|
|
ca6535f210 | ||
|
|
1155008719 | ||
|
|
d07594ed59 | ||
|
|
5efda3eda9 | ||
|
|
4b137efdbd | ||
|
|
383d582b47 | ||
|
|
6eacedc443 | ||
|
|
b1a5bb593c | ||
|
|
9369c6549a | ||
|
|
c7731a3b93 | ||
|
|
24706c163a | ||
|
|
a276dc47e0 | ||
|
|
e55f8a61cd | ||
|
|
c8bcca0845 | ||
|
|
cb6892d2ed | ||
|
|
43eda8d878 | ||
|
|
a2534e03bd | ||
|
|
dc5b955930 | ||
|
|
5de7896ffb | ||
|
|
01af45d14a | ||
|
|
cc9f3ea938 | ||
|
|
ff43de695e | ||
|
|
8bc717a55c | ||
|
|
d09222c900 | ||
|
|
87cdb81fae | ||
|
|
38eb1d548a | ||
|
|
e0960f6288 | ||
|
|
74403f2003 | ||
|
|
b2c83714d1 | ||
|
|
2c21672de6 | ||
|
|
f7dc21773d | ||
|
|
3e457e4edf | ||
|
|
03572d175f | ||
|
|
c4894d6092 | ||
|
|
3fb0383df4 | ||
|
|
ee36763f9d | ||
|
|
955c5549ae | ||
|
|
4a34514b21 | ||
|
|
805d9f22ce | ||
|
|
20f29327e9 | ||
|
|
018b5039e7 | ||
|
|
d6aeb767a0 | ||
|
|
b5d3d4741f | ||
|
|
85c747d444 | ||
|
|
927e6d89d7 | ||
|
|
3d87f23bf5 | ||
|
|
45845d4a2a | ||
|
|
00ef129b2a | ||
|
|
06b219217b | ||
|
|
789910d8eb | ||
|
|
a8e6a0763d | ||
|
|
e1386ba604 | ||
|
|
83deecb9e9 | ||
|
|
d8dcb8f6e0 | ||
|
|
5fa31eaead | ||
|
|
d245201614 | ||
|
|
a5b84a47b0 | ||
|
|
552b2f0635 | ||
|
|
0b3badf3d8 | ||
|
|
cea3e224aa | ||
|
|
1eaf0e1c63 | ||
|
|
2cda982345 | ||
|
|
c9734b6d7b | ||
|
|
fd01377f12 | ||
|
|
8d2fc88336 | ||
|
|
092bf07cbf | ||
|
|
5145a8e8be | ||
|
|
b495d36fa5 | ||
|
|
3bdeaa4a6f | ||
|
|
d1f58fed4c | ||
|
|
12e918bd31 | ||
|
|
637f445c3f | ||
|
|
d0e4cf5895 | ||
|
|
e0bf8b2aab | ||
|
|
483c06b4ab | ||
|
|
f4a3b31415 | ||
|
|
5c7e309d13 | ||
|
|
7a72b2d558 | ||
|
|
c75b21a510 | ||
|
|
a9f318d523 | ||
|
|
1dca0bd8d7 | ||
|
|
f3165a716a | ||
|
|
9f45eba6f6 | ||
|
|
ecaa7dad49 | ||
|
|
ee84e34570 | ||
|
|
442be2ac02 | ||
|
|
22d600e8c0 | ||
|
|
e160018826 | ||
|
|
d1a02bd3e9 | ||
|
|
380fb986b6 | ||
|
|
e7f794531e | ||
|
|
992023288f | ||
|
|
ef5a36dd69 | ||
|
|
3ab90db6ee | ||
|
|
e26be9cb8a | ||
|
|
bba555bb08 | ||
|
|
4b0af73dd2 | ||
|
|
da72b8c385 | ||
|
|
44079b7176 | ||
|
|
19c36fe4c9 | ||
|
|
a742d10c54 | ||
|
|
6bd27038cc | ||
|
|
5df757a403 | ||
|
|
38f5d1b18e | ||
|
|
5f75d4c099 | ||
|
|
319a0d65af | ||
|
|
3d2680a102 | ||
|
|
c36fb5919a | ||
|
|
46d3f4369e | ||
|
|
c2b3920b50 | ||
|
|
6e7323e3e8 | ||
|
|
e98b0371e5 | ||
|
|
f085419055 | ||
|
|
1fedbded62 | ||
|
|
c8258171ca | ||
|
|
007ee0da8e | ||
|
|
5e1ac9ce87 | ||
|
|
a7cd08603e | ||
|
|
854cd1a517 | ||
|
|
cf8c74cb07 | ||
|
|
23565ebe62 | ||
|
|
8467bce2a6 | ||
|
|
e6225d70a1 | ||
|
|
a69de8be40 | ||
|
|
649654207f | ||
|
|
3123502f4c | ||
|
|
17d54cffbb | ||
|
|
bddee7c38e | ||
|
|
6f9c311285 | ||
|
|
0cfa6a8981 | ||
|
|
d5516a4ca9 | ||
|
|
d2b793057e | ||
|
|
b2a409fd4d | ||
|
|
4ba237c5d8 | ||
|
|
f5ef02d4cc | ||
|
|
ec2255764a | ||
|
|
1a8e92c922 | ||
|
|
5c1891ec9f | ||
|
|
83265b7f75 | ||
|
|
5364a10033 | ||
|
|
c2a46e4aa3 | ||
|
|
bae5ce0bfa | ||
|
|
cc5edb720c | ||
|
|
e17c2ef698 | ||
|
|
61b74f9a5b | ||
|
|
0cd83eadc0 | ||
|
|
1757c45490 | ||
|
|
d85f98d2a9 | ||
|
|
9e123011c2 | ||
|
|
774c4d0d6f | ||
|
|
7332679678 | ||
|
|
bb6f727f25 | ||
|
|
586d2a41ce | ||
|
|
91dff61008 | ||
|
|
8203383c03 | ||
|
|
a3c88a0de5 | ||
|
|
fff0aec720 | ||
|
|
b73786c6d5 | ||
|
|
67eeccb31f | ||
|
|
266ca9318d | ||
|
|
3e97299a46 | ||
|
|
eacc42fedd | ||
|
|
db3e8edacd | ||
|
|
6e41634295 | ||
|
|
ef3c2d86d3 | ||
|
|
780308c194 | ||
|
|
696fd690ae | ||
|
|
d323501c7f | ||
|
|
66d8b2c18a | ||
|
|
6d8a415b4d | ||
|
|
dad268a686 | ||
|
|
e7acc2fddf | ||
|
|
6fb17a813c | ||
|
|
11ede9f872 | ||
|
|
6ac1c1c886 | ||
|
|
01c0ab4f06 | ||
|
|
7713f35326 | ||
|
|
7220b09ff9 | ||
|
|
b7298ef51a | ||
|
|
16b10b026b | ||
|
|
9b18c073b6 | ||
|
|
dd89e705f2 | ||
|
|
56b86bbfca | ||
|
|
7e2aafcc76 | ||
|
|
11c774b04f | ||
|
|
6ba926381b | ||
|
|
af55e179c7 | ||
|
|
18a42e4b38 | ||
|
|
a10ccadb54 | ||
|
|
15fee582cc | ||
|
|
43408634bb | ||
|
|
d47fce6ce7 | ||
|
|
9e64267867 | ||
|
|
7ae5785447 | ||
|
|
ef8d3f684f | ||
|
|
cc6e3d14ce | ||
|
|
83f44b1ac1 | ||
|
|
1f470eadd1 | ||
|
|
005b01bd9a | ||
|
|
6f67367b57 | ||
|
|
9ee0600a7f | ||
|
|
30cc7c847e | ||
|
|
a5bb24b886 | ||
|
|
f02d810af8 | ||
|
|
55f6b6a6ab | ||
|
|
b999ee60aa | ||
|
|
85afd3ef14 | ||
|
|
1907030d89 | ||
|
|
361a5eac7e | ||
|
|
fecb41d2ef | ||
|
|
4cdb641e7b | ||
|
|
efa2dff681 | ||
|
|
31a7b7d24e | ||
|
|
af8cc4dc4a | ||
|
|
8eb60f5624 | ||
|
|
791ea89b88 | ||
|
|
c572760a66 | ||
|
|
69fc19f7e0 | ||
|
|
b939c24b3d | ||
|
|
3eb494dbe3 | ||
|
|
d6a66c83c2 | ||
|
|
582a9a5db8 | ||
|
|
0afbc19ffb | ||
|
|
ac9290f985 | ||
|
|
a133ba1998 | ||
|
|
5657738f7e | ||
|
|
d310acc1eb | ||
|
|
2b88f10b04 | ||
|
|
883ba7aa90 | ||
|
|
28f55deaae | ||
|
|
40407930d5 | ||
|
|
674b71b535 | ||
|
|
677d9c47ac | ||
|
|
2638ab98ad | ||
|
|
bc3068c2f9 | ||
|
|
2bde9bea1c | ||
|
|
502f2f040d | ||
|
|
041d4d666e | ||
|
|
c0c10a97e7 | ||
|
|
5a7c50027f | ||
|
|
88b5065e7d | ||
|
|
b690008192 | ||
|
|
2d6bc9536c | ||
|
|
01dc6b2f0e | ||
|
|
d8aa2d0a9e | ||
|
|
19bb97d24d | ||
|
|
9f4f168804 | ||
|
|
82e133b382 | ||
|
|
cf3083d68e | ||
|
|
e796cdbb27 | ||
|
|
2d44582f88 | ||
|
|
2a61344c03 | ||
|
|
77c6aad1b5 | ||
|
|
b60a897265 | ||
|
|
fdd41c706a | ||
|
|
d68cfeed6e | ||
|
|
14911e0d22 | ||
|
|
9503434d53 | ||
|
|
c3c9e955e5 | ||
|
|
72d5db92a8 | ||
|
|
3f302c8d47 | ||
|
|
04a769bb37 | ||
|
|
f9d4a1c1d8 | ||
|
|
3e7db46195 | ||
|
|
e52aca4837 | ||
|
|
5ec503bd6f | ||
|
|
49be805001 | ||
|
|
94596388f7 | ||
|
|
5c4980c6e0 | ||
|
|
6d157f0b3e | ||
|
|
c3d5fdff64 | ||
|
|
d6cbdbd6aa | ||
|
|
d7b8fb3113 | ||
|
|
45044c2d75 | ||
|
|
a9f260d135 | ||
|
|
072b3b9d8c | ||
|
|
ae7f59e249 | ||
|
|
450b4e16b2 | ||
|
|
c48ffa24be | ||
|
|
7f0c0a0922 | ||
|
|
bce1c62308 | ||
|
|
9b3aa3451e | ||
|
|
436c0b58db | ||
|
|
7ac62822cb | ||
|
|
af8ae83ea0 | ||
|
|
0bcecae2a3 | ||
|
|
bd130315b6 | ||
|
|
504711647e | ||
|
|
a9a016d7b1 | ||
|
|
ab12b23e6f | ||
|
|
797bdbd998 | ||
|
|
1c45d37348 | ||
|
|
b521255ec9 | ||
|
|
75ea001bfe | ||
|
|
ff2fb9196f | ||
|
|
ccc11e5680 | ||
|
|
fbded9cdac | ||
|
|
5d3414a40b | ||
|
|
28473e919f | ||
|
|
69636d2453 | ||
|
|
8a63b35f44 |
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -24,7 +24,7 @@ explain why.
|
|||||||
- **Version of Ansible** (`ansible --version`):
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
|
||||||
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
**Kubespray version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
|
|
||||||
**Network plugin used**:
|
**Network plugin used**:
|
||||||
|
|||||||
84
.gitignore
vendored
84
.gitignore
vendored
@@ -1,18 +1,96 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
|
inventory/credentials/
|
||||||
inventory/group_vars/fake_hosts.yml
|
inventory/group_vars/fake_hosts.yml
|
||||||
inventory/host_vars/
|
inventory/host_vars/
|
||||||
temp
|
temp
|
||||||
.idea
|
.idea
|
||||||
.tox
|
.tox
|
||||||
.cache
|
.cache
|
||||||
*.egg-info
|
*.bak
|
||||||
*.pyc
|
|
||||||
*.pyo
|
|
||||||
*.tfstate
|
*.tfstate
|
||||||
*.tfstate.backup
|
*.tfstate.backup
|
||||||
|
contrib/terraform/aws/credentials.tfvars
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
/ssh-bastion.conf
|
/ssh-bastion.conf
|
||||||
**/*.sw[pon]
|
**/*.sw[pon]
|
||||||
vagrant/
|
vagrant/
|
||||||
|
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
inventory/*/artifacts/
|
||||||
|
env/
|
||||||
|
build/
|
||||||
|
credentials/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*,cover
|
||||||
|
.hypothesis/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# IPython Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
.python-version
|
||||||
|
|
||||||
|
# dotenv
|
||||||
|
.env
|
||||||
|
|
||||||
|
# virtualenv
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
|||||||
652
.gitlab-ci.yml
652
.gitlab-ci.yml
@@ -1,14 +1,31 @@
|
|||||||
stages:
|
stages:
|
||||||
- moderator
|
|
||||||
- unit-tests
|
- unit-tests
|
||||||
- deploy-gce-part1
|
- moderator
|
||||||
- deploy-gce-part2
|
- deploy-part1
|
||||||
- deploy-gce-special
|
- deploy-part2
|
||||||
|
- deploy-special
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
|
GITLAB_REPOSITORY: 'kargo-ci/kubernetes-incubator__kubespray'
|
||||||
# DOCKER_HOST: tcp://localhost:2375
|
# DOCKER_HOST: tcp://localhost:2375
|
||||||
ANSIBLE_FORCE_COLOR: "true"
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
|
MAGIC: "ci check this"
|
||||||
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
|
CI_TEST_VARS: "./tests/files/${CI_JOB_NAME}.yml"
|
||||||
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
|
CONTAINER_ENGINE: docker
|
||||||
|
SSH_USER: root
|
||||||
|
GCE_PREEMPTIBLE: "false"
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||||
|
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||||
|
ANSIBLE_INVENTORY: ./inventory/sample/${CI_JOB_NAME}-${BUILD_NUMBER}.ini
|
||||||
|
IDEMPOT_CHECK: "false"
|
||||||
|
RESET_CHECK: "false"
|
||||||
|
UPGRADE_TEST: "false"
|
||||||
|
KUBEADM_ENABLED: "false"
|
||||||
|
LOG_LEVEL: "-vv"
|
||||||
|
|
||||||
# asia-east1-a
|
# asia-east1-a
|
||||||
# asia-northeast1-a
|
# asia-northeast1-a
|
||||||
@@ -18,18 +35,14 @@ variables:
|
|||||||
# us-west1-a
|
# us-west1-a
|
||||||
|
|
||||||
before_script:
|
before_script:
|
||||||
- pip install ansible==2.2.1.0
|
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||||
- pip install netaddr
|
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
|
|
||||||
.job: &job
|
.job: &job
|
||||||
tags:
|
tags:
|
||||||
- kubernetes
|
- kubernetes
|
||||||
- docker
|
- docker
|
||||||
image: quay.io/ant31/kargo:master
|
image: quay.io/kubespray/kubespray:latest
|
||||||
|
|
||||||
.docker_service: &docker_service
|
.docker_service: &docker_service
|
||||||
services:
|
services:
|
||||||
@@ -42,29 +55,17 @@ before_script:
|
|||||||
.gce_variables: &gce_variables
|
.gce_variables: &gce_variables
|
||||||
GCE_USER: travis
|
GCE_USER: travis
|
||||||
SSH_USER: $GCE_USER
|
SSH_USER: $GCE_USER
|
||||||
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
|
||||||
CONTAINER_ENGINE: docker
|
|
||||||
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
|
||||||
GS_ACCESS_KEY_ID: $GS_KEY
|
|
||||||
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
|
||||||
CLOUD_MACHINE_TYPE: "g1-small"
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
CI_PLATFORM: "gce"
|
||||||
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
BOOTSTRAP_OS: none
|
|
||||||
DOWNLOAD_LOCALHOST: "false"
|
|
||||||
DOWNLOAD_RUN_ONCE: "false"
|
|
||||||
IDEMPOT_CHECK: "false"
|
|
||||||
RESET_CHECK: "false"
|
|
||||||
UPGRADE_TEST: "false"
|
|
||||||
RESOLVCONF_MODE: docker_dns
|
|
||||||
LOG_LEVEL: "-vv"
|
|
||||||
ETCD_DEPLOYMENT: "docker"
|
|
||||||
KUBELET_DEPLOYMENT: "docker"
|
|
||||||
VAULT_DEPLOYMENT: "docker"
|
|
||||||
WEAVE_CPU_LIMIT: "100m"
|
|
||||||
MAGIC: "ci check this"
|
|
||||||
|
|
||||||
.gce: &gce
|
.do_variables: &do_variables
|
||||||
|
PRIVATE_KEY: $DO_PRIVATE_KEY
|
||||||
|
CI_PLATFORM: "do"
|
||||||
|
SSH_USER: root
|
||||||
|
|
||||||
|
|
||||||
|
.testcases: &testcases
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *docker_service
|
<<: *docker_service
|
||||||
cache:
|
cache:
|
||||||
@@ -74,331 +75,271 @@ before_script:
|
|||||||
- $HOME/.cache
|
- $HOME/.cache
|
||||||
before_script:
|
before_script:
|
||||||
- docker info
|
- docker info
|
||||||
- pip install ansible==2.2.1.0
|
- /usr/bin/python -m pip install -r requirements.txt
|
||||||
- pip install netaddr
|
- /usr/bin/python -m pip install -r tests/requirements.txt
|
||||||
- pip install apache-libcloud==0.20.1
|
|
||||||
- pip install boto==2.9.0
|
|
||||||
- mkdir -p /.ssh
|
- mkdir -p /.ssh
|
||||||
- mkdir -p $HOME/.ssh
|
- mkdir -p $HOME/.ssh
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- ansible-playbook --version
|
- ansible-playbook --version
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
- export PYPATH=$([[ ! "$CI_JOB_NAME" =~ "coreos" ]] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
- echo "CI_JOB_NAME is $CI_JOB_NAME"
|
||||||
|
- echo "PYPATH is $PYPATH"
|
||||||
script:
|
script:
|
||||||
- pwd
|
- pwd
|
||||||
- ls
|
- ls
|
||||||
- echo ${PWD}
|
- echo ${PWD}
|
||||||
- >
|
- echo "${STARTUP_SCRIPT}"
|
||||||
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
- cd tests && make create-${CI_PLATFORM} -s ; cd -
|
||||||
${LOG_LEVEL}
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
|
|
||||||
# Check out latest tag if testing upgrade
|
# Check out latest tag if testing upgrade
|
||||||
# Uncomment when gitlab kargo repo has tags
|
# Uncomment when gitlab kubespray repo has tags
|
||||||
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
- test "${UPGRADE_TEST}" != "false" && git checkout 031cf565ec3ccd3ebbe80eeef3454c3780e5c598 && pip install ansible==2.2.0
|
- test "${UPGRADE_TEST}" != "false" && git checkout f7d52564aad2ff8e337634951beb4a881c0e8aa6
|
||||||
|
# Checkout the CI vars file so it is available
|
||||||
|
- test "${UPGRADE_TEST}" != "false" && git checkout "${CI_BUILD_REF}" tests/files/${CI_JOB_NAME}.yml
|
||||||
|
# Workaround https://github.com/kubernetes-incubator/kubespray/issues/2021
|
||||||
|
- 'sh -c "echo ignore_assert_errors: true | tee -a tests/files/${CI_JOB_NAME}.yml"'
|
||||||
|
|
||||||
|
|
||||||
# Create cluster
|
# Create cluster
|
||||||
- >
|
- >
|
||||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
ansible-playbook
|
||||||
|
-i ${ANSIBLE_INVENTORY}
|
||||||
|
-b --become-user=root
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-u $SSH_USER
|
||||||
${SSH_ARGS}
|
${SSH_ARGS}
|
||||||
${LOG_LEVEL}
|
${LOG_LEVEL}
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-e @${CI_TEST_VARS}
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
|
||||||
-e cert_management=${CERT_MGMT:-script}
|
|
||||||
-e cloud_provider=gce
|
|
||||||
-e deploy_netchecker=true
|
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
|
||||||
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
cluster.yml
|
cluster.yml
|
||||||
|
|
||||||
# Repeat deployment if testing upgrade
|
# Repeat deployment if testing upgrade
|
||||||
- >
|
- >
|
||||||
if [ "${UPGRADE_TEST}" != "false" ]; then
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||||
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||||
pip install ansible==2.2.1.0;
|
git checkout "${CI_BUILD_REF}";
|
||||||
git checkout "${CI_BUILD_REF}";
|
ansible-playbook
|
||||||
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
-i ${ANSIBLE_INVENTORY}
|
||||||
${SSH_ARGS}
|
-b --become-user=root
|
||||||
${LOG_LEVEL}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e ansible_ssh_user=${SSH_USER}
|
${SSH_ARGS}
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
${LOG_LEVEL}
|
||||||
-e cloud_provider=gce
|
-e @${CI_TEST_VARS}
|
||||||
-e deploy_netchecker=true
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
--limit "all:!fake_hosts"
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
$PLAYBOOK;
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
-e local_release_dir=${PWD}/downloads
|
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
|
||||||
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
|
||||||
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
$PLAYBOOK;
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Tests Cases
|
# Tests Cases
|
||||||
## Test Master API
|
## Test Master API
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
- >
|
||||||
|
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
-e "{kubeadm_enabled: ${KUBEADM_ENABLED}}"
|
||||||
|
|
||||||
## Ping the between 2 pod
|
## Ping the between 2 pod
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
|
||||||
## Advanced DNS checks
|
## Advanced DNS checks
|
||||||
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
- ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
## Idempotency checks 1/5 (repeat deployment)
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i ${ANSIBLE_INVENTORY}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
${SSH_ARGS}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
${LOG_LEVEL}
|
||||||
-e deploy_netchecker=true
|
-e @${CI_TEST_VARS}
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
--limit "all:!fake_hosts"
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 2/5 (Advanced DNS checks)
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-i ${ANSIBLE_INVENTORY}
|
||||||
--limit "all:!fake_hosts"
|
-b --become-user=root
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e @${CI_TEST_VARS}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 3/5 (reset deployment)
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i ${ANSIBLE_INVENTORY}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e reset_confirmation=yes
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e @${CI_TEST_VARS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e reset_confirmation=yes
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
reset.yml;
|
reset.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 4/5 (redeploy after reset)
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
ansible-playbook
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
-i ${ANSIBLE_INVENTORY}
|
||||||
--private-key=${HOME}/.ssh/id_rsa
|
-b --become-user=root
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
-u $SSH_USER
|
||||||
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
${SSH_ARGS}
|
||||||
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
${LOG_LEVEL}
|
||||||
-e deploy_netchecker=true
|
-e @${CI_TEST_VARS}
|
||||||
-e resolvconf_mode=${RESOLVCONF_MODE}
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
-e local_release_dir=${PWD}/downloads
|
-e local_release_dir=${PWD}/downloads
|
||||||
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
--limit "all:!fake_hosts"
|
||||||
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
|
||||||
--limit "all:!fake_hosts"
|
|
||||||
cluster.yml;
|
cluster.yml;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
## Idempotency checks 5/5 (Advanced DNS checks)
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
- >
|
- >
|
||||||
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
if [ "${IDEMPOT_CHECK}" = "true" -a "${RESET_CHECK}" = "true" ]; then
|
||||||
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
ansible-playbook -i ${ANSIBLE_INVENTORY} -e ansible_python_interpreter=${PYPATH}
|
||||||
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
--limit "all:!fake_hosts"
|
--limit "all:!fake_hosts"
|
||||||
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
fi
|
fi
|
||||||
|
|
||||||
after_script:
|
after_script:
|
||||||
- >
|
- cd tests && make delete-${CI_PLATFORM} -s ; cd -
|
||||||
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
.gce: &gce
|
||||||
-e test_id=${TEST_ID}
|
<<: *testcases
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
variables:
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
<<: *gce_variables
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
.do: &do
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
variables:
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
<<: *do_variables
|
||||||
-e cloud_region=${CLOUD_REGION}
|
<<: *testcases
|
||||||
|
|
||||||
# Test matrix. Leave the comments for markup scripts.
|
# Test matrix. Leave the comments for markup scripts.
|
||||||
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
.coreos_calico_aio_variables: &coreos_calico_aio_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
|
||||||
CLOUD_REGION: us-west1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
|
||||||
UPGRADE_TEST: "basic"
|
|
||||||
CLUSTER_MODE: ha
|
|
||||||
UPGRADE_TEST: "graceful"
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
.rhel7_weave_variables: &rhel7_weave_variables
|
.centos_weave_kubeadm_variables: ¢os_weave_kubeadm_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
UPGRADE_TEST: "graceful"
|
||||||
CLOUD_IMAGE: rhel-7
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
|
|
||||||
.centos7_flannel_variables: ¢os7_flannel_variables
|
.ubuntu_canal_kubeadm_variables: &ubuntu_canal_kubeadm_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: flannel
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: centos-7
|
|
||||||
CLOUD_REGION: us-west1-a
|
.ubuntu_contiv_sep_variables: &ubuntu_contiv_sep_variables
|
||||||
CLUSTER_MODE: default
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.coreos_cilium_variables: &coreos_cilium_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.ubuntu_cilium_sep_variables: &ubuntu_cilium_sep_variables
|
||||||
|
# stage: deploy-special
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
|
# stage: deploy-part1
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.centos7_flannel_addons_variables: ¢os7_flannel_addons_variables
|
||||||
|
# stage: deploy-part2
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
.debian8_calico_variables: &debian8_calico_variables
|
.debian8_calico_variables: &debian8_calico_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-part2
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: debian-8-kubespray
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
|
|
||||||
.coreos_canal_variables: &coreos_canal_variables
|
.coreos_canal_variables: &coreos_canal_variables
|
||||||
# stage: deploy-gce-part2
|
# stage: deploy-part2
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
|
||||||
CLOUD_REGION: us-east1-b
|
|
||||||
CLUSTER_MODE: default
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
IDEMPOT_CHECK: "true"
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-special
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: rhel-7
|
|
||||||
CLOUD_REGION: us-east1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
|
|
||||||
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
IDEMPOT_CHECK: "false"
|
|
||||||
|
|
||||||
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-special
|
||||||
KUBE_NETWORK_PLUGIN: calico
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
DOWNLOAD_LOCALHOST: "true"
|
|
||||||
DOWNLOAD_RUN_ONCE: "true"
|
|
||||||
CLOUD_IMAGE: centos-7
|
|
||||||
CLOUD_REGION: europe-west1-b
|
|
||||||
CLUSTER_MODE: ha-scale
|
|
||||||
IDEMPOT_CHECK: "true"
|
|
||||||
|
|
||||||
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
# stage: deploy-gce-special
|
# stage: deploy-special
|
||||||
KUBE_NETWORK_PLUGIN: weave
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: coreos-alpha-1325-0-0-v20170216
|
|
||||||
CLOUD_REGION: us-west1-a
|
|
||||||
CLUSTER_MODE: ha-scale
|
|
||||||
BOOTSTRAP_OS: coreos
|
|
||||||
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
|
||||||
|
|
||||||
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: flannel
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION: us-central1-b
|
|
||||||
CLUSTER_MODE: separate
|
|
||||||
ETCD_DEPLOYMENT: rkt
|
|
||||||
KUBELET_DEPLOYMENT: rkt
|
|
||||||
|
|
||||||
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||||
# stage: deploy-gce-part1
|
# stage: deploy-part1
|
||||||
KUBE_NETWORK_PLUGIN: canal
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
CERT_MGMT: vault
|
|
||||||
CLOUD_IMAGE: ubuntu-1604-xenial
|
.ubuntu_flannel_variables: &ubuntu_flannel_variables
|
||||||
CLOUD_REGION: us-central1-b
|
# stage: deploy-special
|
||||||
CLUSTER_MODE: separate
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
.opensuse_canal_variables: &opensuse_canal_variables
|
||||||
|
# stage: deploy-part2
|
||||||
|
MOVED_TO_GROUP_VARS: "true"
|
||||||
|
|
||||||
|
|
||||||
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
coreos-calico-sep:
|
### PR JOBS PART1
|
||||||
stage: deploy-gce-part1
|
gce_coreos-calico-aio:
|
||||||
|
stage: deploy-part1
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
|
<<: *coreos_calico_aio_variables
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *coreos_calico_sep_variables
|
|
||||||
when: on_success
|
when: on_success
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
coreos-calico-sep-triggers:
|
### PR JOBS PART2
|
||||||
stage: deploy-gce-part1
|
gce_centos7-flannel-addons:
|
||||||
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *coreos_calico_sep_variables
|
<<: *centos7_flannel_addons_variables
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
centos7-flannel:
|
|
||||||
stage: deploy-gce-part2
|
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_variables
|
|
||||||
when: on_success
|
when: on_success
|
||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
centos7-flannel-triggers:
|
gce_ubuntu-weave-sep:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
|
||||||
<<: *gce
|
|
||||||
variables:
|
|
||||||
<<: *gce_variables
|
|
||||||
<<: *centos7_flannel_variables
|
|
||||||
when: on_success
|
|
||||||
only: ['triggers']
|
|
||||||
|
|
||||||
ubuntu-weave-sep:
|
|
||||||
stage: deploy-gce-special
|
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -408,8 +349,40 @@ ubuntu-weave-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: [/^pr-.*$/]
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
ubuntu-weave-sep-triggers:
|
### MANUAL JOBS
|
||||||
stage: deploy-gce-part1
|
gce_coreos-calico-sep-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_calico_aio_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_ha_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
gce_centos7-flannel-addons-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_flannel_addons_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
|
||||||
|
gce_ubuntu-weave-sep-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -419,8 +392,18 @@ ubuntu-weave-sep-triggers:
|
|||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
# More builds for PRs/merges (manual) and triggers (auto)
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
ubuntu-canal-ha:
|
do_ubuntu-canal-ha:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *do
|
||||||
|
variables:
|
||||||
|
<<: *do_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_ubuntu-canal-ha:
|
||||||
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -430,18 +413,83 @@ ubuntu-canal-ha:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
ubuntu-canal-ha-triggers:
|
gce_ubuntu-canal-kubeadm:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
<<: *gce_variables
|
<<: *gce_variables
|
||||||
<<: *ubuntu_canal_ha_variables
|
<<: *ubuntu_canal_kubeadm_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_ubuntu-canal-kubeadm-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_kubeadm_variables
|
||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
rhel7-weave:
|
gce_centos-weave-kubeadm:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_centos-weave-kubeadm-triggers:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos_weave_kubeadm_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
gce_ubuntu-contiv-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_contiv_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_coreos-cilium:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_cilium_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_ubuntu-cilium-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_cilium_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_rhel7-weave:
|
||||||
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -451,8 +499,8 @@ rhel7-weave:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
rhel7-weave-triggers:
|
gce_rhel7-weave-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -461,8 +509,8 @@ rhel7-weave-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
debian8-calico-upgrade:
|
gce_debian8-calico-upgrade:
|
||||||
stage: deploy-gce-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -472,8 +520,8 @@ debian8-calico-upgrade:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
debian8-calico-triggers:
|
gce_debian8-calico-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -482,8 +530,8 @@ debian8-calico-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
coreos-canal:
|
gce_coreos-canal:
|
||||||
stage: deploy-gce-part2
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -493,8 +541,8 @@ coreos-canal:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
coreos-canal-triggers:
|
gce_coreos-canal-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -503,8 +551,8 @@ coreos-canal-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
rhel7-canal-sep:
|
gce_rhel7-canal-sep:
|
||||||
stage: deploy-gce-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -514,8 +562,8 @@ rhel7-canal-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/,]
|
only: ['master', /^pr-.*$/,]
|
||||||
|
|
||||||
rhel7-canal-sep-triggers:
|
gce_rhel7-canal-sep-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -524,8 +572,8 @@ rhel7-canal-sep-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
centos7-calico-ha:
|
gce_centos7-calico-ha:
|
||||||
stage: deploy-gce-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -535,8 +583,8 @@ centos7-calico-ha:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
centos7-calico-ha-triggers:
|
gce_centos7-calico-ha-triggers:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -545,9 +593,20 @@ centos7-calico-ha-triggers:
|
|||||||
when: on_success
|
when: on_success
|
||||||
only: ['triggers']
|
only: ['triggers']
|
||||||
|
|
||||||
|
gce_opensuse-canal:
|
||||||
|
stage: deploy-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *opensuse_canal_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||||
coreos-alpha-weave-ha:
|
gce_coreos-alpha-weave-ha:
|
||||||
stage: deploy-gce-special
|
stage: deploy-special
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -557,8 +616,8 @@ coreos-alpha-weave-ha:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
ubuntu-rkt-sep:
|
gce_ubuntu-rkt-sep:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -568,8 +627,8 @@ ubuntu-rkt-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
ubuntu-vault-sep:
|
gce_ubuntu-vault-sep:
|
||||||
stage: deploy-gce-part1
|
stage: deploy-part2
|
||||||
<<: *job
|
<<: *job
|
||||||
<<: *gce
|
<<: *gce
|
||||||
variables:
|
variables:
|
||||||
@@ -579,6 +638,17 @@ ubuntu-vault-sep:
|
|||||||
except: ['triggers']
|
except: ['triggers']
|
||||||
only: ['master', /^pr-.*$/]
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
gce_ubuntu-flannel-sep:
|
||||||
|
stage: deploy-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_flannel_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
# Premoderated with manual actions
|
# Premoderated with manual actions
|
||||||
ci-authorized:
|
ci-authorized:
|
||||||
<<: *job
|
<<: *job
|
||||||
@@ -588,7 +658,7 @@ ci-authorized:
|
|||||||
script:
|
script:
|
||||||
- /bin/sh scripts/premoderator.sh
|
- /bin/sh scripts/premoderator.sh
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
syntax-check:
|
syntax-check:
|
||||||
<<: *job
|
<<: *job
|
||||||
stage: unit-tests
|
stage: unit-tests
|
||||||
@@ -596,6 +666,14 @@ syntax-check:
|
|||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
||||||
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root extra_playbooks/upgrade-only-k8s.yml -vvv --syntax-check
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
yamllint:
|
||||||
|
<<: *job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- yamllint roles
|
||||||
except: ['triggers', 'master']
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
tox-inventory-builder:
|
tox-inventory-builder:
|
||||||
|
|||||||
161
.travis.yml.bak
161
.travis.yml.bak
@@ -1,161 +0,0 @@
|
|||||||
sudo: required
|
|
||||||
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
|
|
||||||
git:
|
|
||||||
depth: 5
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
GCE_USER=travis
|
|
||||||
SSH_USER=$GCE_USER
|
|
||||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
||||||
CONTAINER_ENGINE=docker
|
|
||||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
||||||
GS_ACCESS_KEY_ID=$GS_KEY
|
|
||||||
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
|
||||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=none
|
|
||||||
matrix:
|
|
||||||
# Debian Jessie
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=asia-east1-a
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=europe-west1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# Centos 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=asia-northeast1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=centos-7
|
|
||||||
CLOUD_REGION=us-central1-b
|
|
||||||
CLUSTER_MODE=ha
|
|
||||||
|
|
||||||
# Redhat 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=us-east1-c
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
|
|
||||||
# CoreOS stable
|
|
||||||
#- >-
|
|
||||||
# KUBE_NETWORK_PLUGIN=weave
|
|
||||||
# CLOUD_IMAGE=coreos-stable
|
|
||||||
# CLOUD_REGION=europe-west1-b
|
|
||||||
# CLUSTER_MODE=ha
|
|
||||||
# BOOTSTRAP_OS=coreos
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-west1-b
|
|
||||||
CLUSTER_MODE=default
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
# Extra cases for separated roles
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=canal
|
|
||||||
CLOUD_IMAGE=rhel-7
|
|
||||||
CLOUD_REGION=asia-northeast1-b
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=europe-west1-d
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=coreos-stable
|
|
||||||
CLOUD_REGION=us-central1-f
|
|
||||||
CLUSTER_MODE=separate
|
|
||||||
BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
allow_failures:
|
|
||||||
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
# Install Ansible.
|
|
||||||
- pip install --user ansible
|
|
||||||
- pip install --user netaddr
|
|
||||||
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
|
||||||
- pip install --user apache-libcloud==0.20.1
|
|
||||||
- pip install --user boto==2.9.0 -U
|
|
||||||
# Load cached docker images
|
|
||||||
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- directories:
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/.local
|
|
||||||
- /var/tmp/releases
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
||||||
|
|
||||||
script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Create cluster with netchecker app deployed
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
|
||||||
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e bootstrap_os=${BOOTSTRAP_OS}
|
|
||||||
-e ansible_python_interpreter=${PYPATH}
|
|
||||||
-e download_run_once=true
|
|
||||||
-e download_localhost=true
|
|
||||||
-e local_release_dir=/var/tmp/releases
|
|
||||||
-e deploy_netchecker=true
|
|
||||||
cluster.yml
|
|
||||||
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
## Advanced DNS checks
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e mode=${CLUSTER_MODE}
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
16
.yamllint
Normal file
16
.yamllint
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
rules:
|
||||||
|
braces:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
brackets:
|
||||||
|
min-spaces-inside: 0
|
||||||
|
max-spaces-inside: 1
|
||||||
|
indentation:
|
||||||
|
spaces: 2
|
||||||
|
indent-sequences: consistent
|
||||||
|
line-length: disable
|
||||||
|
new-line-at-end-of-file: disable
|
||||||
|
truthy: disable
|
||||||
16
Dockerfile
Normal file
16
Dockerfile
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
|
RUN mkdir /kubespray
|
||||||
|
WORKDIR /kubespray
|
||||||
|
RUN apt update -y && \
|
||||||
|
apt install -y \
|
||||||
|
libssl-dev python-dev sshpass apt-transport-https \
|
||||||
|
ca-certificates curl gnupg2 software-properties-common python-pip
|
||||||
|
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
|
||||||
|
add-apt-repository \
|
||||||
|
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||||
|
$(lsb_release -cs) \
|
||||||
|
stable" \
|
||||||
|
&& apt update -y && apt-get install docker-ce -y
|
||||||
|
COPY . .
|
||||||
|
RUN /usr/bin/python -m pip install pip -U && /usr/bin/python -m pip install -r tests/requirements.txt && python -m pip install -r requirements.txt
|
||||||
189
README.md
189
README.md
@@ -1,115 +1,160 @@
|
|||||||

|

|
||||||
|
|
||||||
## Deploy a production ready kubernetes cluster
|
Deploy a Production Ready Kubernetes Cluster
|
||||||
|
============================================
|
||||||
|
|
||||||
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
If you have questions, join us on the [kubernetes slack](https://kubernetes.slack.com), channel **\#kubespray**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack, vSphere or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Support most popular **Linux distributions**
|
- Support most popular **Linux distributions**
|
||||||
- **Continuous integration tests**
|
- **Continuous integration tests**
|
||||||
|
|
||||||
|
Quick Start
|
||||||
|
-----------
|
||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
### Ansible
|
||||||
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
|
||||||
|
|
||||||
|
# Copy ``inventory/sample`` as ``inventory/mycluster``
|
||||||
|
cp -rfp inventory/sample inventory/mycluster
|
||||||
|
|
||||||
* [Requirements](#requirements)
|
# Update Ansible inventory file with inventory builder
|
||||||
* [Kargo vs ...](docs/comparisons.md)
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
* [Getting started](docs/getting-started.md)
|
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
* [Ansible inventory and tags](docs/ansible.md)
|
|
||||||
* [Deployment data variables](docs/vars.md)
|
|
||||||
* [DNS stack](docs/dns-stack.md)
|
|
||||||
* [HA mode](docs/ha-mode.md)
|
|
||||||
* [Network plugins](#network-plugins)
|
|
||||||
* [Vagrant install](docs/vagrant.md)
|
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
|
||||||
* [Downloaded artifacts](docs/downloads.md)
|
|
||||||
* [Cloud providers](docs/cloud.md)
|
|
||||||
* [OpenStack](docs/openstack.md)
|
|
||||||
* [AWS](docs/aws.md)
|
|
||||||
* [Azure](docs/azure.md)
|
|
||||||
* [Large deployments](docs/large-deployments.md)
|
|
||||||
* [Upgrades basics](docs/upgrades.md)
|
|
||||||
* [Roadmap](docs/roadmap.md)
|
|
||||||
|
|
||||||
Supported Linux distributions
|
# Review and change parameters under ``inventory/mycluster/group_vars``
|
||||||
===============
|
cat inventory/mycluster/group_vars/all.yml
|
||||||
|
cat inventory/mycluster/group_vars/k8s-cluster.yml
|
||||||
|
|
||||||
* **Container Linux by CoreOS**
|
# Deploy Kubespray with Ansible Playbook
|
||||||
* **Debian** Jessie
|
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml
|
||||||
* **Ubuntu** 16.04
|
|
||||||
* **CentOS/RHEL** 7
|
### Vagrant
|
||||||
|
|
||||||
|
# Simply running `vagrant up` (for tests purposes)
|
||||||
|
vagrant up
|
||||||
|
|
||||||
|
Documents
|
||||||
|
---------
|
||||||
|
|
||||||
|
- [Requirements](#requirements)
|
||||||
|
- [Kubespray vs ...](docs/comparisons.md)
|
||||||
|
- [Getting started](docs/getting-started.md)
|
||||||
|
- [Ansible inventory and tags](docs/ansible.md)
|
||||||
|
- [Integration with existing ansible repo](docs/integration.md)
|
||||||
|
- [Deployment data variables](docs/vars.md)
|
||||||
|
- [DNS stack](docs/dns-stack.md)
|
||||||
|
- [HA mode](docs/ha-mode.md)
|
||||||
|
- [Network plugins](#network-plugins)
|
||||||
|
- [Vagrant install](docs/vagrant.md)
|
||||||
|
- [CoreOS bootstrap](docs/coreos.md)
|
||||||
|
- [Debian Jessie setup](docs/debian.md)
|
||||||
|
- [openSUSE setup](docs/opensuse.md)
|
||||||
|
- [Downloaded artifacts](docs/downloads.md)
|
||||||
|
- [Cloud providers](docs/cloud.md)
|
||||||
|
- [OpenStack](docs/openstack.md)
|
||||||
|
- [AWS](docs/aws.md)
|
||||||
|
- [Azure](docs/azure.md)
|
||||||
|
- [vSphere](docs/vsphere.md)
|
||||||
|
- [Large deployments](docs/large-deployments.md)
|
||||||
|
- [Upgrades basics](docs/upgrades.md)
|
||||||
|
- [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
|
Supported Linux Distributions
|
||||||
|
-----------------------------
|
||||||
|
|
||||||
|
- **Container Linux by CoreOS**
|
||||||
|
- **Debian** Jessie, Stretch, Wheezy
|
||||||
|
- **Ubuntu** 16.04
|
||||||
|
- **CentOS/RHEL** 7
|
||||||
|
- **Fedora/CentOS** Atomic
|
||||||
|
- **openSUSE** Leap 42.3/Tumbleweed
|
||||||
|
|
||||||
Note: Upstart/SysV init based OS types are not supported.
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
|
|
||||||
Versions of supported components
|
Versions of supported components
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
- [kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.9.5
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
- [etcd](https://github.com/coreos/etcd/releases) v3.2.4
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
- [flanneld](https://github.com/coreos/flannel/releases) v0.10.0
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
- [calico](https://docs.projectcalico.org/v2.6/releases/) v2.6.8
|
||||||
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
- [canal](https://github.com/projectcalico/canal) (given calico/flannel versions)
|
||||||
[weave](http://weave.works/) v1.8.2 <br>
|
- [cilium](https://github.com/cilium/cilium) v1.0.0-rc8
|
||||||
[docker](https://www.docker.com/) v1.12.5 <br>
|
- [contiv](https://github.com/contiv/install/releases) v1.1.7
|
||||||
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
- [weave](http://weave.works/) v2.2.1
|
||||||
|
- [docker](https://www.docker.com/) v17.03 (see note)
|
||||||
|
- [rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 (see Note 2)
|
||||||
|
|
||||||
Note: rkt support as docker alternative is limited to control plane (etcd and
|
Note: kubernetes doesn't support newer docker versions. Among other things kubelet currently breaks on docker's non-standard version numbering (it no longer uses semantic versioning). To ensure auto-updates don't break your cluster look into e.g. yum versionlock plugin or apt pin).
|
||||||
|
|
||||||
|
Note 2: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
plugins' related OS services. Also note, only one of the supported network
|
plugins' related OS services. Also note, only one of the supported network
|
||||||
plugins can be deployed for a given single cluster.
|
plugins can be deployed for a given single cluster.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
--------------
|
------------
|
||||||
|
|
||||||
* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine
|
- **Ansible v2.4 (or newer) and python-netaddr is installed on the machine
|
||||||
that will run Ansible commands**
|
that will run Ansible commands**
|
||||||
* **Jinja 2.8 (or newer) is required to run the Ansible Playbooks**
|
- **Jinja 2.9 (or newer) is required to run the Ansible Playbooks**
|
||||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
- The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
* The target servers are configured to allow **IPv4 forwarding**.
|
- The target servers are configured to allow **IPv4 forwarding**.
|
||||||
* **Your ssh key must be copied** to all the servers part of your inventory.
|
- **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
- The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall.
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
|
- If kubespray is ran from non-root user account, correct privilege escalation method
|
||||||
|
should be configured in the target servers. Then the `ansible_become` flag
|
||||||
|
or command parameters `--become or -b` should be specified.
|
||||||
|
|
||||||
|
Network Plugins
|
||||||
|
---------------
|
||||||
|
|
||||||
## Network plugins
|
You can choose between 6 network plugins. (default: `calico`, except Vagrant uses `flannel`)
|
||||||
You can choose between 4 network plugins. (default: `calico`)
|
|
||||||
|
|
||||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
- [flannel](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
- [calico](docs/calico.md): bgp (layer 3) networking.
|
||||||
|
|
||||||
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
- [canal](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
- [cilium](http://docs.cilium.io/en/latest/): layer 3/4 networking (as well as layer 7 to protect and secure application protocols), supports dynamic insertion of BPF bytecode into the Linux kernel to implement security services, networking and visibility logic.
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
|
||||||
|
- [contiv](docs/contiv.md): supports vlan, vxlan, bgp and Cisco SDN networking. This plugin is able to
|
||||||
|
apply firewall policies, segregate containers in multiple network and bridging pods onto physical networks.
|
||||||
|
|
||||||
|
- [weave](docs/weave.md): Weave is a lightweight container overlay network that doesn't require an external K/V database cluster.
|
||||||
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`. There is also an
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
option to leverage built-in cloud provider networking instead.
|
option to leverage built-in cloud provider networking instead.
|
||||||
See also [Network checker](docs/netcheck.md).
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
## Community docs and resources
|
Community docs and resources
|
||||||
- [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/)
|
----------------------------
|
||||||
- [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
|
||||||
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
|
||||||
- [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
|
||||||
|
|
||||||
## Tools and projects on top of Kargo
|
- [kubernetes.io/docs/getting-started-guides/kubespray/](https://kubernetes.io/docs/getting-started-guides/kubespray/)
|
||||||
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
- [kubespray, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||||
- [Kargo-cli](https://github.com/kubespray/kargo-cli)
|
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||||
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
- [Deploy a Kubernetes Cluster with Kubespray (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||||
- [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform)
|
|
||||||
|
|
||||||
## CI Tests
|
Tools and projects on top of Kubespray
|
||||||
|
--------------------------------------
|
||||||
|
|
||||||
|
- [Digital Rebar Provision](https://github.com/digitalrebar/provision/blob/master/doc/integrations/ansible.rst)
|
||||||
|
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||||
|
- [Terraform Contrib](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/terraform)
|
||||||
|
|
||||||
|
CI Tests
|
||||||
|
--------
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
[](https://gitlab.com/kubespray-ci/kubernetes-incubator__kubespray/pipelines)
|
||||||
|
|
||||||
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
CI/end-to-end tests sponsored by Google (GCE)
|
||||||
See the [test matrix](docs/test_cases.md) for details.
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
|
|||||||
20
RELEASE.md
20
RELEASE.md
@@ -1,16 +1,16 @@
|
|||||||
# Release Process
|
# Release Process
|
||||||
|
|
||||||
The Kargo Project is released on an as-needed basis. The process is as follows:
|
The Kubespray Project is released on an as-needed basis. The process is as follows:
|
||||||
|
|
||||||
1. An issue is proposing a new release with a changelog since the last release
|
1. An issue is proposing a new release with a changelog since the last release
|
||||||
2. At least on of the [OWNERS](OWNERS) must LGTM this release
|
2. At least one of the [OWNERS](OWNERS) must LGTM this release
|
||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
4. The release issue is closed
|
4. The release issue is closed
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] Kubespray $VERSION is released`
|
||||||
|
|
||||||
## Major/minor releases, merge freezes and milestones
|
## Major/minor releases, merge freezes and milestones
|
||||||
|
|
||||||
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
* Kubespray does not maintain stable branches for releases. Releases are tags, not
|
||||||
branches, and there are no backports. Therefore, there is no need for merge
|
branches, and there are no backports. Therefore, there is no need for merge
|
||||||
freezes as well.
|
freezes as well.
|
||||||
|
|
||||||
@@ -20,21 +20,21 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
|
|||||||
support lifetime, which ends once the milestone closed. Then only a next major
|
support lifetime, which ends once the milestone closed. Then only a next major
|
||||||
or minor release can be done.
|
or minor release can be done.
|
||||||
|
|
||||||
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
* Kubespray major and minor releases are bound to the given ``kube_version`` major/minor
|
||||||
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||||
Older or newer versions are not supported and not tested for the given release.
|
Older or newer versions are not supported and not tested for the given release.
|
||||||
|
|
||||||
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
* There is no unstable releases and no APIs, thus Kubespray doesn't follow
|
||||||
[semver](http://semver.org/). Every version describes only a stable release.
|
[semver](http://semver.org/). Every version describes only a stable release.
|
||||||
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||||
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||||
the contributed addons or bound versions of Kubernetes and other components, are
|
the contributed addons or bound versions of Kubernetes and other components, are
|
||||||
considered out of Kargo scope and are up to the components' teams to deal with and
|
considered out of Kubespray scope and are up to the components' teams to deal with and
|
||||||
document.
|
document.
|
||||||
|
|
||||||
* Minor releases can change components' versions, but not the major ``kube_version``.
|
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||||
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
Greater ``kube_version`` requires a new major or minor release. For example, if Kubespray v2.0.0
|
||||||
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||||
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
then Kubespray v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||||
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||||
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
And Kubespray v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||||
|
|||||||
77
Vagrantfile
vendored
77
Vagrantfile
vendored
@@ -3,26 +3,47 @@
|
|||||||
|
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
|
|
||||||
Vagrant.require_version ">= 1.8.0"
|
Vagrant.require_version ">= 2.0.0"
|
||||||
|
|
||||||
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
CONFIG = File.join(File.dirname(__FILE__), "vagrant/config.rb")
|
||||||
|
|
||||||
|
COREOS_URL_TEMPLATE = "https://storage.googleapis.com/%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json"
|
||||||
|
|
||||||
|
# Uniq disk UUID for libvirt
|
||||||
|
DISK_UUID = Time.now.utc.to_i
|
||||||
|
|
||||||
|
SUPPORTED_OS = {
|
||||||
|
"coreos-stable" => {box: "coreos-stable", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["stable"]},
|
||||||
|
"coreos-alpha" => {box: "coreos-alpha", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["alpha"]},
|
||||||
|
"coreos-beta" => {box: "coreos-beta", bootstrap_os: "coreos", user: "core", box_url: COREOS_URL_TEMPLATE % ["beta"]},
|
||||||
|
"ubuntu" => {box: "bento/ubuntu-16.04", bootstrap_os: "ubuntu", user: "vagrant"},
|
||||||
|
"centos" => {box: "centos/7", bootstrap_os: "centos", user: "vagrant"},
|
||||||
|
"opensuse" => {box: "opensuse/openSUSE-42.3-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||||
|
"opensuse-tumbleweed" => {box: "opensuse/openSUSE-Tumbleweed-x86_64", bootstrap_os: "opensuse", use: "vagrant"},
|
||||||
|
}
|
||||||
|
|
||||||
# Defaults for config options defined in CONFIG
|
# Defaults for config options defined in CONFIG
|
||||||
$num_instances = 3
|
$num_instances = 3
|
||||||
$instance_name_prefix = "k8s"
|
$instance_name_prefix = "k8s"
|
||||||
$vm_gui = false
|
$vm_gui = false
|
||||||
$vm_memory = 1536
|
$vm_memory = 2048
|
||||||
$vm_cpus = 1
|
$vm_cpus = 1
|
||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$box = "bento/ubuntu-16.04"
|
$os = "ubuntu"
|
||||||
|
$network_plugin = "flannel"
|
||||||
# The first three nodes are etcd servers
|
# The first three nodes are etcd servers
|
||||||
$etcd_instances = $num_instances
|
$etcd_instances = $num_instances
|
||||||
# The first two nodes are masters
|
# The first two nodes are kube masters
|
||||||
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
# All nodes are kube nodes
|
# All nodes are kube nodes
|
||||||
$kube_node_instances = $num_instances
|
$kube_node_instances = $num_instances
|
||||||
|
# The following only works when using the libvirt provider
|
||||||
|
$kube_node_instances_with_disks = false
|
||||||
|
$kube_node_instances_with_disks_size = "20G"
|
||||||
|
$kube_node_instances_with_disks_number = 2
|
||||||
|
|
||||||
$local_release_dir = "/vagrant/temp"
|
$local_release_dir = "/vagrant/temp"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
@@ -31,8 +52,9 @@ if File.exist?(CONFIG)
|
|||||||
require CONFIG
|
require CONFIG
|
||||||
end
|
end
|
||||||
|
|
||||||
|
$box = SUPPORTED_OS[$os][:box]
|
||||||
# if $inventory is not set, try to use example
|
# if $inventory is not set, try to use example
|
||||||
$inventory = File.join(File.dirname(__FILE__), "inventory") if ! $inventory
|
$inventory = File.join(File.dirname(__FILE__), "inventory", "sample") if ! $inventory
|
||||||
|
|
||||||
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
# if $inventory has a hosts file use it, otherwise copy over vars etc
|
||||||
# to where vagrant expects dynamic inventory to be.
|
# to where vagrant expects dynamic inventory to be.
|
||||||
@@ -41,7 +63,7 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
|||||||
"provisioners", "ansible")
|
"provisioners", "ansible")
|
||||||
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
FileUtils.mkdir_p($vagrant_ansible) if ! File.exist?($vagrant_ansible)
|
||||||
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
if ! File.exist?(File.join($vagrant_ansible,"inventory"))
|
||||||
FileUtils.ln_s($inventory, $vagrant_ansible)
|
FileUtils.ln_s($inventory, File.join($vagrant_ansible,"inventory"))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -56,12 +78,14 @@ Vagrant.configure("2") do |config|
|
|||||||
# always use Vagrants insecure key
|
# always use Vagrants insecure key
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
config.vm.box = $box
|
config.vm.box = $box
|
||||||
|
if SUPPORTED_OS[$os].has_key? :box_url
|
||||||
|
config.vm.box_url = SUPPORTED_OS[$os][:box_url]
|
||||||
|
end
|
||||||
|
config.ssh.username = SUPPORTED_OS[$os][:user]
|
||||||
# plugin conflict
|
# plugin conflict
|
||||||
if Vagrant.has_plugin?("vagrant-vbguest") then
|
if Vagrant.has_plugin?("vagrant-vbguest") then
|
||||||
config.vbguest.auto_update = false
|
config.vbguest.auto_update = false
|
||||||
end
|
end
|
||||||
|
|
||||||
(1..$num_instances).each do |i|
|
(1..$num_instances).each do |i|
|
||||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||||
config.vm.hostname = vm_name
|
config.vm.hostname = vm_name
|
||||||
@@ -87,25 +111,48 @@ Vagrant.configure("2") do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
config.vm.synced_folder ".", "/vagrant", type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||||
|
|
||||||
|
$shared_folders.each do |src, dst|
|
||||||
|
config.vm.synced_folder src, dst, type: "rsync", rsync__args: ['--verbose', '--archive', '--delete', '-z']
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |vb|
|
config.vm.provider :virtualbox do |vb|
|
||||||
vb.gui = $vm_gui
|
vb.gui = $vm_gui
|
||||||
vb.memory = $vm_memory
|
vb.memory = $vm_memory
|
||||||
vb.cpus = $vm_cpus
|
vb.cpus = $vm_cpus
|
||||||
end
|
end
|
||||||
|
|
||||||
|
config.vm.provider :libvirt do |lv|
|
||||||
|
lv.memory = $vm_memory
|
||||||
|
end
|
||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
host_vars[vm_name] = {
|
host_vars[vm_name] = {
|
||||||
"ip": ip,
|
"ip": ip,
|
||||||
"flannel_interface": ip,
|
"bootstrap_os": SUPPORTED_OS[$os][:bootstrap_os],
|
||||||
"flannel_backend_type": "host-gw",
|
|
||||||
"local_release_dir" => $local_release_dir,
|
"local_release_dir" => $local_release_dir,
|
||||||
"download_run_once": "False",
|
"download_run_once": "False",
|
||||||
# Override the default 'calico' with flannel.
|
"kube_network_plugin": $network_plugin
|
||||||
# inventory/group_vars/k8s-cluster.yml
|
|
||||||
"kube_network_plugin": "flannel",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
|
# Disable swap for each vm
|
||||||
|
config.vm.provision "shell", inline: "swapoff -a"
|
||||||
|
|
||||||
|
if $kube_node_instances_with_disks
|
||||||
|
# Libvirt
|
||||||
|
driverletters = ('a'..'z').to_a
|
||||||
|
config.vm.provider :libvirt do |lv|
|
||||||
|
# always make /dev/sd{a/b/c} so that CI can ensure that
|
||||||
|
# virtualbox and libvirt will have the same devices to use for OSDs
|
||||||
|
(1..$kube_node_instances_with_disks_number).each do |d|
|
||||||
|
lv.storage :file, :device => "hd#{driverletters[d]}", :path => "disk-#{i}-#{d}-#{DISK_UUID}.disk", :size => $kube_node_instances_with_disks_size, :bus => "ide"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
# Only execute once the Ansible provisioner,
|
# Only execute once the Ansible provisioner,
|
||||||
# when all the machines are up and ready.
|
# when all the machines are up and ready.
|
||||||
if i == $num_instances
|
if i == $num_instances
|
||||||
@@ -114,10 +161,10 @@ Vagrant.configure("2") do |config|
|
|||||||
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
if File.exist?(File.join(File.dirname($inventory), "hosts"))
|
||||||
ansible.inventory_path = $inventory
|
ansible.inventory_path = $inventory
|
||||||
end
|
end
|
||||||
ansible.sudo = true
|
ansible.become = true
|
||||||
ansible.limit = "all"
|
ansible.limit = "all"
|
||||||
ansible.host_key_checking = false
|
ansible.host_key_checking = false
|
||||||
ansible.raw_arguments = ["--forks=#{$num_instances}"]
|
ansible.raw_arguments = ["--forks=#{$num_instances}", "--flush-cache"]
|
||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=100 -o UserKnownHostsFile=/dev/null
|
||||||
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
@@ -10,3 +10,6 @@ fact_caching_connection = /tmp
|
|||||||
stdout_callback = skippy
|
stdout_callback = skippy
|
||||||
library = ./library
|
library = ./library
|
||||||
callback_whitelist = profile_tasks
|
callback_whitelist = profile_tasks
|
||||||
|
roles_path = roles:$VIRTUAL_ENV/usr/local/share/kubespray/roles:$VIRTUAL_ENV/usr/local/share/ansible/roles:/usr/share/kubespray/roles
|
||||||
|
deprecation_warnings=False
|
||||||
|
inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds
|
||||||
|
|||||||
66
cluster.yml
66
cluster.yml
@@ -2,7 +2,7 @@
|
|||||||
- hosts: localhost
|
- hosts: localhost
|
||||||
gather_facts: False
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
@@ -13,7 +13,7 @@
|
|||||||
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
ansible_ssh_pipelining: false
|
ansible_ssh_pipelining: false
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: bootstrap-os, tags: bootstrap-os}
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
@@ -21,72 +21,104 @@
|
|||||||
vars:
|
vars:
|
||||||
ansible_ssh_pipelining: true
|
ansible_ssh_pipelining: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
pre_tasks:
|
||||||
|
- name: gather facts from all instances
|
||||||
|
setup:
|
||||||
|
delegate_to: "{{item}}"
|
||||||
|
delegate_facts: True
|
||||||
|
with_items: "{{ groups['k8s-cluster'] + groups['etcd'] + groups['calico-rr']|default([]) }}"
|
||||||
|
|
||||||
- hosts: k8s-cluster:etcd:calico-rr
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
|
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: docker, tags: docker }
|
- { role: docker, tags: docker }
|
||||||
- role: rkt
|
- role: rkt
|
||||||
tags: rkt
|
tags: rkt
|
||||||
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||||
|
- { role: download, tags: download, skip_downloads: false }
|
||||||
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault
|
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults, when: "cert_management == 'vault'" }
|
- { role: kubespray-defaults, when: "cert_management == 'vault'" }
|
||||||
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||||
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: etcd
|
- hosts: etcd
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||||
|
|
||||||
- hosts: etcd:k8s-cluster:vault
|
- hosts: etcd:k8s-cluster:vault:calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||||
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: kubernetes/client, tags: client }
|
||||||
|
- { role: kubernetes-apps/cluster_roles, tags: cluster-roles }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes/kubeadm, tags: kubeadm, when: "kubeadm_enabled" }
|
||||||
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
|
- { role: kubernetes-apps/rotate_tokens, tags: rotate_tokens, when: "secret_changed|default(false)" }
|
||||||
|
|
||||||
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps/network_plugin, tags: network }
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
- { role: kubernetes-apps/ingress_controller, tags: ingress-controller }
|
||||||
|
- { role: kubernetes-apps/external_provisioner, tags: external-provisioner }
|
||||||
|
|
||||||
- hosts: calico-rr
|
- hosts: calico-rr
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: network_plugin/calico/rr, tags: network }
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
|
environment: "{{proxy_env}}"
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kargo-defaults}
|
- { role: kubespray-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
@@ -1,59 +1,3 @@
|
|||||||
## Kubernetes Community Code of Conduct
|
# Kubernetes Community Code of Conduct
|
||||||
|
|
||||||
### Contributor Code of Conduct
|
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
|
||||||
|
|
||||||
As contributors and maintainers of this project, and in the interest of fostering
|
|
||||||
an open and welcoming community, we pledge to respect all people who contribute
|
|
||||||
through reporting issues, posting feature requests, updating documentation,
|
|
||||||
submitting pull requests or patches, and other activities.
|
|
||||||
|
|
||||||
We are committed to making participation in this project a harassment-free experience for
|
|
||||||
everyone, regardless of level of experience, gender, gender identity and expression,
|
|
||||||
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
|
|
||||||
religion, or nationality.
|
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
|
||||||
|
|
||||||
* The use of sexualized language or imagery
|
|
||||||
* Personal attacks
|
|
||||||
* Trolling or insulting/derogatory comments
|
|
||||||
* Public or private harassment
|
|
||||||
* Publishing other's private information, such as physical or electronic addresses,
|
|
||||||
without explicit permission
|
|
||||||
* Other unethical or unprofessional conduct.
|
|
||||||
|
|
||||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
|
||||||
comments, commits, code, wiki edits, issues, and other contributions that are not
|
|
||||||
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
|
|
||||||
commit themselves to fairly and consistently applying these principles to every aspect
|
|
||||||
of managing this project. Project maintainers who do not follow or enforce the Code of
|
|
||||||
Conduct may be permanently removed from the project team.
|
|
||||||
|
|
||||||
This code of conduct applies both within project spaces and in public spaces
|
|
||||||
when an individual is representing the project or its community.
|
|
||||||
|
|
||||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by
|
|
||||||
opening an issue or contacting one or more of the project maintainers.
|
|
||||||
|
|
||||||
This Code of Conduct is adapted from the Contributor Covenant
|
|
||||||
(http://contributor-covenant.org), version 1.2.0, available at
|
|
||||||
http://contributor-covenant.org/version/1/2/0/
|
|
||||||
|
|
||||||
### Kubernetes Events Code of Conduct
|
|
||||||
|
|
||||||
Kubernetes events are working conferences intended for professional networking and collaboration in the
|
|
||||||
Kubernetes community. Attendees are expected to behave according to professional standards and in accordance
|
|
||||||
with their employer's policies on appropriate workplace behavior.
|
|
||||||
|
|
||||||
While at Kubernetes events or related social networking opportunities, attendees should not engage in
|
|
||||||
discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should
|
|
||||||
be especially aware of these concerns.
|
|
||||||
|
|
||||||
The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes
|
|
||||||
team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to
|
|
||||||
be engaging in discriminatory or offensive speech or actions.
|
|
||||||
|
|
||||||
Please bring any concerns to to the immediate attention of Kubernetes event staff
|
|
||||||
|
|
||||||
|
|
||||||
[]()
|
|
||||||
|
|||||||
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
61
contrib/aws_inventory/kubespray-aws-inventory.py
Executable file
@@ -0,0 +1,61 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
import os
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
|
||||||
|
class SearchEC2Tags(object):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.parse_args()
|
||||||
|
if self.args.list:
|
||||||
|
self.search_tags()
|
||||||
|
if self.args.host:
|
||||||
|
data = {}
|
||||||
|
print json.dumps(data, indent=2)
|
||||||
|
|
||||||
|
def parse_args(self):
|
||||||
|
|
||||||
|
##Check if VPC_VISIBILITY is set, if not default to private
|
||||||
|
if "VPC_VISIBILITY" in os.environ:
|
||||||
|
self.vpc_visibility = os.environ['VPC_VISIBILITY']
|
||||||
|
else:
|
||||||
|
self.vpc_visibility = "private"
|
||||||
|
|
||||||
|
##Support --list and --host flags. We largely ignore the host one.
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--list', action='store_true', default=False, help='List instances')
|
||||||
|
parser.add_argument('--host', action='store_true', help='Get all the variables about a specific instance')
|
||||||
|
self.args = parser.parse_args()
|
||||||
|
|
||||||
|
def search_tags(self):
|
||||||
|
hosts = {}
|
||||||
|
hosts['_meta'] = { 'hostvars': {} }
|
||||||
|
|
||||||
|
##Search ec2 three times to find nodes of each group type. Relies on kubespray-role key/value.
|
||||||
|
for group in ["kube-master", "kube-node", "etcd"]:
|
||||||
|
hosts[group] = []
|
||||||
|
tag_key = "kubespray-role"
|
||||||
|
tag_value = ["*"+group+"*"]
|
||||||
|
region = os.environ['REGION']
|
||||||
|
|
||||||
|
ec2 = boto3.resource('ec2', region)
|
||||||
|
|
||||||
|
instances = ec2.instances.filter(Filters=[{'Name': 'tag:'+tag_key, 'Values': tag_value}, {'Name': 'instance-state-name', 'Values': ['running']}])
|
||||||
|
for instance in instances:
|
||||||
|
if self.vpc_visibility == "public":
|
||||||
|
hosts[group].append(instance.public_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.public_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.public_ip_address
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
hosts[group].append(instance.private_dns_name)
|
||||||
|
hosts['_meta']['hostvars'][instance.private_dns_name] = {
|
||||||
|
'ansible_ssh_host': instance.private_ip_address
|
||||||
|
}
|
||||||
|
|
||||||
|
hosts['k8s-cluster'] = {'children':['kube-master', 'kube-node']}
|
||||||
|
print json.dumps(hosts, sort_keys=True, indent=2)
|
||||||
|
|
||||||
|
SearchEC2Tags()
|
||||||
@@ -5,7 +5,7 @@ Provision the base infrastructure for a Kubernetes cluster by using [Azure Resou
|
|||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||||
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kubespray of course).
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ $ ./clear-rg.sh <resource_group_name>
|
|||||||
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||||
|
|
||||||
|
|
||||||
## Generating an inventory for kargo
|
## Generating an inventory for kubespray
|
||||||
|
|
||||||
After you have applied the templates, you can generate an inventory with this call:
|
After you have applied the templates, you can generate an inventory with this call:
|
||||||
|
|
||||||
@@ -55,10 +55,10 @@ After you have applied the templates, you can generate an inventory with this ca
|
|||||||
$ ./generate-inventory.sh <resource_group_name>
|
$ ./generate-inventory.sh <resource_group_name>
|
||||||
```
|
```
|
||||||
|
|
||||||
It will create the file ./inventory which can then be used with kargo, e.g.:
|
It will create the file ./inventory which can then be used with kubespray, e.g.:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
$ cd kargo-root-dir
|
$ cd kubespray-root-dir
|
||||||
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/sample/group_vars/all.yml" cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -9,11 +9,18 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if az &>/dev/null; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
./apply-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
elif azure &>/dev/null; then
|
||||||
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
ansible-playbook generate-templates.yml
|
||||||
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
|
||||||
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
19
contrib/azurerm/apply-rg_2.sh
Executable file
19
contrib/azurerm/apply-rg_2.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create --template-file ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
az group deployment create --template-file ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
@@ -9,6 +9,10 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ansible-playbook generate-templates.yml
|
if az &>/dev/null; then
|
||||||
|
echo "azure cli 2.0 found, using it instead of 1.0"
|
||||||
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
./clear-rg_2.sh "$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||||
|
fi
|
||||||
|
|||||||
14
contrib/azurerm/clear-rg_2.sh
Executable file
14
contrib/azurerm/clear-rg_2.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
az group deployment create -g "$AZURE_RESOURCE_GROUP" --template-file ./.generated/clear-rg.json --mode Complete
|
||||||
@@ -8,5 +8,11 @@ if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
|||||||
echo "AZURE_RESOURCE_GROUP is missing"
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
# check if azure cli 2.0 exists else use azure cli 1.0
|
||||||
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
if az &>/dev/null; then
|
||||||
|
ansible-playbook generate-inventory_2.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
elif azure &>/dev/null; then
|
||||||
|
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
|
else
|
||||||
|
echo "Azure cli not found"
|
||||||
|
fi
|
||||||
|
|||||||
5
contrib/azurerm/generate-inventory_2.yml
Normal file
5
contrib/azurerm/generate-inventory_2.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-inventory_2
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
|
|
||||||
# Due to some Azure limitations, this name must be globally unique
|
# Due to some Azure limitations (ex:- Storage Account's name must be unique),
|
||||||
|
# this name must be globally unique - it will be used as a prefix for azure components
|
||||||
cluster_name: example
|
cluster_name: example
|
||||||
|
|
||||||
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||||
@@ -17,10 +18,29 @@ minions_os_disk_size: 1000
|
|||||||
|
|
||||||
admin_username: devops
|
admin_username: devops
|
||||||
admin_password: changeme
|
admin_password: changeme
|
||||||
|
|
||||||
|
# MAKE SURE TO CHANGE THIS TO YOUR PUBLIC KEY to access your azure machines
|
||||||
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
# Disable using ssh using password. Change it to false to allow to connect to ssh by password
|
||||||
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
# Azure CIDRs
|
# Azure CIDRs
|
||||||
azure_vnet_cidr: 10.0.0.0/8
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
azure_admin_cidr: 10.241.2.0/24
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
azure_masters_cidr: 10.0.4.0/24
|
azure_masters_cidr: 10.0.4.0/24
|
||||||
azure_minions_cidr: 10.240.0.0/16
|
azure_minions_cidr: 10.240.0.0/16
|
||||||
|
|
||||||
|
# Azure loadbalancer port to use to access your cluster
|
||||||
|
kube_apiserver_port: 6443
|
||||||
|
|
||||||
|
# Azure Netwoking and storage naming to use with inventory/all.yml
|
||||||
|
#azure_virtual_network_name: KubeVNET
|
||||||
|
#azure_subnet_admin_name: ad-subnet
|
||||||
|
#azure_subnet_masters_name: master-subnet
|
||||||
|
#azure_subnet_minions_name: minion-subnet
|
||||||
|
#azure_route_table_name: routetable
|
||||||
|
#azure_security_group_name: secgroup
|
||||||
|
|
||||||
|
# Storage types available are: "Standard_LRS","Premium_LRS"
|
||||||
|
#azure_storage_account_type: Standard_LRS
|
||||||
|
|||||||
@@ -8,4 +8,4 @@
|
|||||||
vm_list: "{{ vm_list_cmd.stdout }}"
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
- name: Generate inventory
|
- name: Generate inventory
|
||||||
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
|
|||||||
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
16
contrib/azurerm/roles/generate-inventory_2/tasks/main.yml
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Query Azure VMs IPs
|
||||||
|
command: az vm list-ip-addresses -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_ip_list_cmd
|
||||||
|
|
||||||
|
- name: Query Azure VMs Roles
|
||||||
|
command: az vm list -o json --resource-group {{ azure_resource_group }}
|
||||||
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
vm_ip_list: "{{ vm_ip_list_cmd.stdout }}"
|
||||||
|
vm_roles_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
|
- name: Generate inventory
|
||||||
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
|
||||||
|
{% for vm in vm_ip_list %}
|
||||||
|
{% if not use_bastion or vm.virtualMachine.name == 'bastion' %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.publicIpAddresses[0].ipAddress }} ip={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% else %}
|
||||||
|
{{ vm.virtualMachine.name }} ansible_ssh_host={{ vm.virtualMachine.network.privateIpAddresses[0] }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-master' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'etcd' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
{% for vm in vm_roles_list %}
|
||||||
|
{% if 'kube-node' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
@@ -1,15 +1,15 @@
|
|||||||
apiVersion: "2015-06-15"
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
virtualNetworkName: "KubVNET"
|
virtualNetworkName: "{{ azure_virtual_network_name | default('KubeVNET') }}"
|
||||||
|
|
||||||
subnetAdminName: "ad-subnet"
|
subnetAdminName: "{{ azure_subnet_admin_name | default('ad-subnet') }}"
|
||||||
subnetMastersName: "master-subnet"
|
subnetMastersName: "{{ azure_subnet_masters_name | default('master-subnet') }}"
|
||||||
subnetMinionsName: "minion-subnet"
|
subnetMinionsName: "{{ azure_subnet_minions_name | default('minion-subnet') }}"
|
||||||
|
|
||||||
routeTableName: "routetable"
|
routeTableName: "{{ azure_route_table_name | default('routetable') }}"
|
||||||
securityGroupName: "secgroup"
|
securityGroupName: "{{ azure_security_group_name | default('secgroup') }}"
|
||||||
|
|
||||||
nameSuffix: "{{cluster_name}}"
|
nameSuffix: "{{ cluster_name }}"
|
||||||
|
|
||||||
availabilitySetMasters: "master-avs"
|
availabilitySetMasters: "master-avs"
|
||||||
availabilitySetMinions: "minion-avs"
|
availabilitySetMinions: "minion-avs"
|
||||||
@@ -33,5 +33,5 @@ imageReference:
|
|||||||
imageReferenceJson: "{{imageReference|to_json}}"
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
storageAccountType: "Standard_LRS"
|
storageAccountType: "{{ azure_storage_account_type | default('Standard_LRS') }}"
|
||||||
|
|
||||||
|
|||||||
@@ -62,8 +62,8 @@
|
|||||||
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
},
|
},
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"frontendPort": 443,
|
"frontendPort": "{{kube_apiserver_port}}",
|
||||||
"backendPort": 443,
|
"backendPort": "{{kube_apiserver_port}}",
|
||||||
"enableFloatingIP": false,
|
"enableFloatingIP": false,
|
||||||
"idleTimeoutInMinutes": 5,
|
"idleTimeoutInMinutes": 5,
|
||||||
"probe": {
|
"probe": {
|
||||||
@@ -77,7 +77,7 @@
|
|||||||
"name": "kube-api",
|
"name": "kube-api",
|
||||||
"properties": {
|
"properties": {
|
||||||
"protocol": "tcp",
|
"protocol": "tcp",
|
||||||
"port": 443,
|
"port": "{{kube_apiserver_port}}",
|
||||||
"intervalInSeconds": 5,
|
"intervalInSeconds": 5,
|
||||||
"numberOfProbes": 2
|
"numberOfProbes": 2
|
||||||
}
|
}
|
||||||
@@ -193,4 +193,4 @@
|
|||||||
} {% if not loop.last %},{% endif %}
|
} {% if not loop.last %},{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -92,7 +92,7 @@
|
|||||||
"description": "Allow secure kube-api",
|
"description": "Allow secure kube-api",
|
||||||
"protocol": "Tcp",
|
"protocol": "Tcp",
|
||||||
"sourcePortRange": "*",
|
"sourcePortRange": "*",
|
||||||
"destinationPortRange": "443",
|
"destinationPortRange": "{{kube_apiserver_port}}",
|
||||||
"sourceAddressPrefix": "Internet",
|
"sourceAddressPrefix": "Internet",
|
||||||
"destinationAddressPrefix": "*",
|
"destinationAddressPrefix": "*",
|
||||||
"access": "Allow",
|
"access": "Allow",
|
||||||
@@ -106,4 +106,4 @@
|
|||||||
"dependsOn": []
|
"dependsOn": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ import re
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||||
'calico-rr']
|
'calico-rr', 'vault']
|
||||||
PROTECTED_NAMES = ROLES
|
PROTECTED_NAMES = ROLES
|
||||||
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
@@ -54,7 +54,7 @@ def get_var_as_bool(name, default):
|
|||||||
|
|
||||||
# Configurable as shell vars start
|
# Configurable as shell vars start
|
||||||
|
|
||||||
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory/sample/hosts.ini")
|
||||||
# Reconfigures cluster distribution at scale
|
# Reconfigures cluster distribution at scale
|
||||||
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||||
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||||
@@ -65,7 +65,7 @@ HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
|||||||
# Configurable as shell vars end
|
# Configurable as shell vars end
|
||||||
|
|
||||||
|
|
||||||
class KargoInventory(object):
|
class KubesprayInventory(object):
|
||||||
|
|
||||||
def __init__(self, changed_hosts=None, config_file=None):
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
self.config = configparser.ConfigParser(allow_no_value=True,
|
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||||
@@ -250,6 +250,7 @@ class KargoInventory(object):
|
|||||||
def set_etcd(self, hosts):
|
def set_etcd(self, hosts):
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
self.add_host_to_group('etcd', host)
|
self.add_host_to_group('etcd', host)
|
||||||
|
self.add_host_to_group('vault', host)
|
||||||
|
|
||||||
def load_file(self, files=None):
|
def load_file(self, files=None):
|
||||||
'''Directly loads JSON, or YAML file to inventory.'''
|
'''Directly loads JSON, or YAML file to inventory.'''
|
||||||
@@ -317,7 +318,7 @@ Delete a host by id: inventory.py -node1
|
|||||||
|
|
||||||
Configurable env vars:
|
Configurable env vars:
|
||||||
DEBUG Enable debug printing. Default: True
|
DEBUG Enable debug printing. Default: True
|
||||||
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
CONFIG_FILE File to write config to Default: ./inventory/sample/hosts.ini
|
||||||
HOST_PREFIX Host prefix for generated hosts. Default: node
|
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||||
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||||
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||||
@@ -337,7 +338,7 @@ MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
|||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
if not argv:
|
if not argv:
|
||||||
argv = sys.argv[1:]
|
argv = sys.argv[1:]
|
||||||
KargoInventory(argv, CONFIG_FILE)
|
KubesprayInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
sys.exit(main())
|
sys.exit(main())
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
[metadata]
|
[metadata]
|
||||||
name = kargo-inventory-builder
|
name = kubespray-inventory-builder
|
||||||
version = 0.1
|
version = 0.1
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ class TestInventory(unittest.TestCase):
|
|||||||
sys_mock.exit = mock.Mock()
|
sys_mock.exit = mock.Mock()
|
||||||
super(TestInventory, self).setUp()
|
super(TestInventory, self).setUp()
|
||||||
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||||
self.inv = inventory.KargoInventory()
|
self.inv = inventory.KubesprayInventory()
|
||||||
|
|
||||||
def test_get_ip_from_opts(self):
|
def test_get_ip_from_opts(self):
|
||||||
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
# Kargo on KVM Virtual Machines hypervisor preparation
|
# Kubespray on KVM Virtual Machines hypervisor preparation
|
||||||
|
|
||||||
A simple playbook to ensure your system has the right settings to enable Kargo
|
A simple playbook to ensure your system has the right settings to enable Kubespray
|
||||||
deployment on VMs.
|
deployment on VMs.
|
||||||
|
|
||||||
This playbook does not create Virtual Machines, nor does it run Kargo itself.
|
This playbook does not create Virtual Machines, nor does it run Kubespray itself.
|
||||||
|
|
||||||
### User creation
|
### User creation
|
||||||
|
|
||||||
If you want to create a user for running Kargo deployment, you should specify
|
If you want to create a user for running Kubespray deployment, you should specify
|
||||||
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
#k8s_deployment_user: kargo
|
#k8s_deployment_user: kubespray
|
||||||
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
|
|||||||
@@ -12,9 +12,9 @@
|
|||||||
line: 'br_netfilter'
|
line: 'br_netfilter'
|
||||||
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||||
|
|
||||||
- name: Add br_netfilter into /etc/modules-load.d/kargo.conf
|
- name: Add br_netfilter into /etc/modules-load.d/kubespray.conf
|
||||||
copy:
|
copy:
|
||||||
dest: /etc/modules-load.d/kargo.conf
|
dest: /etc/modules-load.d/kubespray.conf
|
||||||
content: |-
|
content: |-
|
||||||
### This file is managed by Ansible
|
### This file is managed by Ansible
|
||||||
br-netfilter
|
br-netfilter
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
# Deploying a Kubespray Kubernetes Cluster with GlusterFS
|
||||||
|
|
||||||
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||||
|
|
||||||
@@ -6,16 +6,16 @@ You can either deploy using Ansible on its own by supplying your own inventory f
|
|||||||
|
|
||||||
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||||
|
|
||||||
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/sample/k8s_gfs_inventory`. Make sure that the settings on `inventory/sample/group_vars/all.yml` make sense with your deployment. Then execute change to the kubespray root folder, and execute (supposing that the machines are all using ubuntu):
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
ansible-playbook -b --become-user=root -i inventory/sample/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
```
|
```
|
||||||
|
|
||||||
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||||
@@ -28,7 +28,7 @@ k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_us
|
|||||||
|
|
||||||
## Using Terraform and Ansible
|
## Using Terraform and Ansible
|
||||||
|
|
||||||
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
First step is to fill in a `my-kubespray-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
cluster_name = "cluster1"
|
cluster_name = "cluster1"
|
||||||
@@ -65,15 +65,15 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
Then, standing on the kubespray directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|
||||||
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||||
|
|
||||||
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
Then, provision your Kubernetes (kubespray) cluster with the following ansible call:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||||
@@ -88,5 +88,5 @@ ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./co
|
|||||||
If you need to destroy the cluster, you can run:
|
If you need to destroy the cluster, you can run:
|
||||||
|
|
||||||
```
|
```
|
||||||
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kubespray-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,8 +1,17 @@
|
|||||||
---
|
---
|
||||||
|
- hosts: gfs-cluster
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
|
roles:
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- hosts: all
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: gfs-cluster
|
- hosts: gfs-cluster
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
roles:
|
roles:
|
||||||
- { role: glusterfs/server }
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
@@ -12,6 +21,5 @@
|
|||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes-pv/lib }
|
|
||||||
- { role: kubernetes-pv }
|
- { role: kubernetes-pv }
|
||||||
|
|
||||||
|
|||||||
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
1
contrib/network-storage/glusterfs/roles/bootstrap-os
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../roles/bootstrap-os
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
with_items:
|
with_items:
|
||||||
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
- { file: glusterfs-kubernetes-endpoint-svc.json.j2, type: svc, dest: glusterfs-kubernetes-endpoint-svc.json}
|
||||||
register: gluster_pv
|
register: gluster_pv
|
||||||
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"kind": "Service",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"ports": [
|
||||||
|
{"port": 1}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
62
contrib/packaging/rpm/kubespray.spec
Normal file
62
contrib/packaging/rpm/kubespray.spec
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
%global srcname kubespray
|
||||||
|
|
||||||
|
%{!?upstream_version: %global upstream_version %{version}%{?milestone}}
|
||||||
|
|
||||||
|
Name: kubespray
|
||||||
|
Version: master
|
||||||
|
Release: %(git describe | sed -r 's/v(\S+-?)-(\S+)-(\S+)/\1.dev\2+\3/')
|
||||||
|
Summary: Ansible modules for installing Kubernetes
|
||||||
|
|
||||||
|
Group: System Environment/Libraries
|
||||||
|
License: ASL 2.0
|
||||||
|
Url: https://github.com/kubernetes-incubator/kubespray
|
||||||
|
Source0: https://github.com/kubernetes-incubator/kubespray/archive/%{upstream_version}.tar.gz#/%{name}-%{release}.tar.gz
|
||||||
|
|
||||||
|
BuildArch: noarch
|
||||||
|
BuildRequires: git
|
||||||
|
BuildRequires: python2
|
||||||
|
BuildRequires: python2-devel
|
||||||
|
BuildRequires: python2-setuptools
|
||||||
|
BuildRequires: python-d2to1
|
||||||
|
BuildRequires: python2-pbr
|
||||||
|
|
||||||
|
Requires: ansible >= 2.4.0
|
||||||
|
Requires: python-jinja2 >= 2.10
|
||||||
|
Requires: python-netaddr
|
||||||
|
Requires: python-pbr
|
||||||
|
|
||||||
|
%description
|
||||||
|
|
||||||
|
Ansible-kubespray is a set of Ansible modules and playbooks for
|
||||||
|
installing a Kubernetes cluster. If you have questions, join us
|
||||||
|
on the https://slack.k8s.io, channel '#kubespray'.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%autosetup -n %{name}-%{upstream_version} -S git
|
||||||
|
|
||||||
|
|
||||||
|
%build
|
||||||
|
export PBR_VERSION=%{release}
|
||||||
|
%{__python2} setup.py build bdist_rpm
|
||||||
|
|
||||||
|
|
||||||
|
%install
|
||||||
|
export PBR_VERSION=%{release}
|
||||||
|
export SKIP_PIP_INSTALL=1
|
||||||
|
%{__python2} setup.py install --skip-build --root %{buildroot} bdist_rpm
|
||||||
|
|
||||||
|
|
||||||
|
%files
|
||||||
|
%doc %{_docdir}/%{name}/README.md
|
||||||
|
%doc %{_docdir}/%{name}/inventory/sample/hosts.ini
|
||||||
|
%config %{_sysconfdir}/%{name}/ansible.cfg
|
||||||
|
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/all.yml
|
||||||
|
%config %{_sysconfdir}/%{name}/inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
%license %{_docdir}/%{name}/LICENSE
|
||||||
|
%{python2_sitelib}/%{srcname}-%{release}-py%{python2_version}.egg-info
|
||||||
|
%{_datarootdir}/%{name}/roles/
|
||||||
|
%{_datarootdir}/%{name}/playbooks/
|
||||||
|
%defattr(-,root,root)
|
||||||
|
|
||||||
|
|
||||||
|
%changelog
|
||||||
@@ -14,22 +14,112 @@ This project will create:
|
|||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
- Export the variables for your AWS credentials or edit credentials.tfvars:
|
- Export the variables for your AWS credentials or edit `credentials.tfvars`:
|
||||||
|
|
||||||
```
|
```
|
||||||
export aws_access_key="xxx"
|
export AWS_ACCESS_KEY_ID="www"
|
||||||
export aws_secret_key="yyy"
|
export AWS_SECRET_ACCESS_KEY ="xxx"
|
||||||
export aws_ssh_key_name="zzz"
|
export AWS_SSH_KEY_NAME="yyy"
|
||||||
|
export AWS_DEFAULT_REGION="zzz"
|
||||||
|
```
|
||||||
|
- Rename `contrib/terraform/aws/terraform.tfvars.example` to `terraform.tfvars`
|
||||||
|
|
||||||
|
- Update `contrib/terraform/aws/terraform.tfvars` with your data. By default, the Terraform scripts use CoreOS as base image. If you want to change this behaviour, see note "Using other distrib than CoreOs" below.
|
||||||
|
- Allocate a new AWS Elastic IP. Use this for your `loadbalancer_apiserver_address` value (below)
|
||||||
|
- Create an AWS EC2 SSH Key
|
||||||
|
- Run with `terraform apply --var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```commandline
|
||||||
|
terraform apply -var-file=credentials.tfvars -var 'loadbalancer_apiserver_address=34.212.228.77'
|
||||||
```
|
```
|
||||||
|
|
||||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
- Terraform automatically creates an Ansible Inventory file called `hosts` with the created infrastructure in the directory `inventory`
|
||||||
|
|
||||||
- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
- Ansible will automatically generate an ssh config file for your bastion hosts. To connect to hosts with ssh using bastion host use generated ssh-bastion.conf.
|
||||||
|
Ansible automatically detects bastion and changes ssh_args
|
||||||
|
```commandline
|
||||||
|
ssh -F ./ssh-bastion.conf user@$ip
|
||||||
|
```
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
|
- Once the infrastructure is created, you can run the kubespray playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
|
Example (this one assumes you are using CoreOS)
|
||||||
|
```commandline
|
||||||
|
ansible-playbook -i ./inventory/hosts ./cluster.yml -e ansible_ssh_user=core -e bootstrap_os=coreos -b --become-user=root --flush-cache
|
||||||
|
```
|
||||||
|
***Using other distrib than CoreOs***
|
||||||
|
If you want to use another distribution than CoreOS, you can modify the search filters of the 'data "aws_ami" "distro"' in variables.tf.
|
||||||
|
|
||||||
|
For example, to use:
|
||||||
|
- Debian Jessie, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||||
|
data "aws_ami" "distro" {
|
||||||
|
most_recent = true
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["debian-jessie-amd64-hvm-*"]
|
||||||
|
}
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "virtualization-type"
|
||||||
|
values = ["hvm"]
|
||||||
|
}
|
||||||
|
|
||||||
|
owners = ["379101102735"]
|
||||||
|
}
|
||||||
|
|
||||||
|
- Ubuntu 16.04, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||||
|
data "aws_ami" "distro" {
|
||||||
|
most_recent = true
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-*"]
|
||||||
|
}
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "virtualization-type"
|
||||||
|
values = ["hvm"]
|
||||||
|
}
|
||||||
|
|
||||||
|
owners = ["099720109477"]
|
||||||
|
}
|
||||||
|
|
||||||
|
- Centos 7, replace 'data "aws_ami" "distro"' in variables.tf with
|
||||||
|
data "aws_ami" "distro" {
|
||||||
|
most_recent = true
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["dcos-centos7-*"]
|
||||||
|
}
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "virtualization-type"
|
||||||
|
values = ["hvm"]
|
||||||
|
}
|
||||||
|
|
||||||
|
owners = ["688023202711"]
|
||||||
|
}
|
||||||
|
|
||||||
|
**Troubleshooting**
|
||||||
|
|
||||||
|
***Remaining AWS IAM Instance Profile***:
|
||||||
|
|
||||||
|
If the cluster was destroyed without using Terraform it is possible that
|
||||||
|
the AWS IAM Instance Profiles still remain. To delete them you can use
|
||||||
|
the `AWS CLI` with the following command:
|
||||||
|
```
|
||||||
|
aws iam delete-instance-profile --region <region_name> --instance-profile-name <profile_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
***Ansible Inventory doesnt get created:***
|
||||||
|
|
||||||
|
It could happen that Terraform doesnt create an Ansible Inventory file automatically. If this is the case copy the output after `inventory=` and create a file named `hosts`in the directory `inventory` and paste the inventory into the file.
|
||||||
|
|
||||||
**Architecture**
|
**Architecture**
|
||||||
|
|
||||||
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||||
|
|
||||||

|

|
||||||
|
|||||||
@@ -8,6 +8,8 @@ provider "aws" {
|
|||||||
region = "${var.AWS_DEFAULT_REGION}"
|
region = "${var.AWS_DEFAULT_REGION}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "aws_availability_zones" "available" {}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calling modules who create the initial AWS VPC / AWS ELB
|
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||||
* and AWS IAM Roles for Kubernetes Deployment
|
* and AWS IAM Roles for Kubernetes Deployment
|
||||||
@@ -18,10 +20,10 @@ module "aws-vpc" {
|
|||||||
|
|
||||||
aws_cluster_name = "${var.aws_cluster_name}"
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
aws_avail_zones="${var.aws_avail_zones}"
|
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||||
|
|
||||||
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||||
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||||
|
default_tags="${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,10 +33,11 @@ module "aws-elb" {
|
|||||||
|
|
||||||
aws_cluster_name="${var.aws_cluster_name}"
|
aws_cluster_name="${var.aws_cluster_name}"
|
||||||
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||||
aws_avail_zones="${var.aws_avail_zones}"
|
aws_avail_zones="${slice(data.aws_availability_zones.available.names,0,2)}"
|
||||||
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
aws_elb_api_port = "${var.aws_elb_api_port}"
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
|
default_tags="${var.default_tags}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,12 +51,13 @@ module "aws-iam" {
|
|||||||
* Create Bastion Instances in AWS
|
* Create Bastion Instances in AWS
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
resource "aws_instance" "bastion-server" {
|
resource "aws_instance" "bastion-server" {
|
||||||
ami = "${var.aws_bastion_ami}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_bastion_size}"
|
instance_type = "${var.aws_bastion_size}"
|
||||||
count = "${length(var.aws_cidr_subnets_public)}"
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
associate_public_ip_address = true
|
associate_public_ip_address = true
|
||||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
@@ -61,11 +65,11 @@ resource "aws_instance" "bastion-server" {
|
|||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-bastion-${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Cluster", "${var.aws_cluster_name}",
|
||||||
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
"Role", "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -75,13 +79,13 @@ resource "aws_instance" "bastion-server" {
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
resource "aws_instance" "k8s-master" {
|
resource "aws_instance" "k8s-master" {
|
||||||
ami = "${var.aws_cluster_ami}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_kube_master_size}"
|
instance_type = "${var.aws_kube_master_size}"
|
||||||
|
|
||||||
count = "${var.aws_kube_master_num}"
|
count = "${var.aws_kube_master_num}"
|
||||||
|
|
||||||
|
|
||||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
@@ -92,11 +96,11 @@ resource "aws_instance" "k8s-master" {
|
|||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-master${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
Role = "master"
|
"Role", "master"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_elb_attachment" "attach_master_nodes" {
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
@@ -107,13 +111,13 @@ resource "aws_elb_attachment" "attach_master_nodes" {
|
|||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "k8s-etcd" {
|
resource "aws_instance" "k8s-etcd" {
|
||||||
ami = "${var.aws_cluster_ami}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_etcd_size}"
|
instance_type = "${var.aws_etcd_size}"
|
||||||
|
|
||||||
count = "${var.aws_etcd_num}"
|
count = "${var.aws_etcd_num}"
|
||||||
|
|
||||||
|
|
||||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
@@ -121,23 +125,22 @@ resource "aws_instance" "k8s-etcd" {
|
|||||||
|
|
||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
tags = "${merge(var.default_tags, map(
|
||||||
tags {
|
"Name", "kubernetes-${var.aws_cluster_name}-etcd${count.index}",
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"Role", "etcd"
|
||||||
Role = "etcd"
|
))}"
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "k8s-worker" {
|
resource "aws_instance" "k8s-worker" {
|
||||||
ami = "${var.aws_cluster_ami}"
|
ami = "${data.aws_ami.distro.id}"
|
||||||
instance_type = "${var.aws_kube_worker_size}"
|
instance_type = "${var.aws_kube_worker_size}"
|
||||||
|
|
||||||
count = "${var.aws_kube_worker_num}"
|
count = "${var.aws_kube_worker_num}"
|
||||||
|
|
||||||
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
availability_zone = "${element(slice(data.aws_availability_zones.available.names,0,2),count.index)}"
|
||||||
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
@@ -146,35 +149,34 @@ resource "aws_instance" "k8s-worker" {
|
|||||||
key_name = "${var.AWS_SSH_KEY_NAME}"
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
"Name", "kubernetes-${var.aws_cluster_name}-worker${count.index}",
|
||||||
Cluster = "${var.aws_cluster_name}"
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member",
|
||||||
Role = "worker"
|
"Role", "worker"
|
||||||
}
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create Kargo Inventory File
|
* Create Kubespray Inventory File
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
data "template_file" "inventory" {
|
data "template_file" "inventory" {
|
||||||
template = "${file("${path.module}/templates/inventory.tpl")}"
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
vars {
|
vars {
|
||||||
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
connection_strings_master = "${join("\n",formatlist("%s ansible_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
connection_strings_node = "${join("\n", formatlist("%s ansible_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
connection_strings_etcd = "${join("\n",formatlist("%s ansible_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||||
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||||
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "null_resource" "inventories" {
|
resource "null_resource" "inventories" {
|
||||||
@@ -182,4 +184,8 @@ resource "null_resource" "inventories" {
|
|||||||
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
triggers {
|
||||||
|
template = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 114 KiB After Width: | Height: | Size: 114 KiB |
@@ -2,9 +2,9 @@ resource "aws_security_group" "aws-elb" {
|
|||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
vpc_id = "${var.aws_vpc_id}"
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ resource "aws_elb" "aws-elb-api" {
|
|||||||
healthy_threshold = 2
|
healthy_threshold = 2
|
||||||
unhealthy_threshold = 2
|
unhealthy_threshold = 2
|
||||||
timeout = 3
|
timeout = 3
|
||||||
target = "HTTP:8080/"
|
target = "TCP:${var.k8s_secure_api_port}"
|
||||||
interval = 30
|
interval = 30
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ resource "aws_elb" "aws-elb-api" {
|
|||||||
connection_draining = true
|
connection_draining = true
|
||||||
connection_draining_timeout = 400
|
connection_draining_timeout = 400
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
"Name", "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,3 +26,8 @@ variable "aws_subnet_ids_public" {
|
|||||||
description = "IDs of Public Subnets"
|
description = "IDs of Public Subnets"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
@@ -129,10 +129,10 @@ EOF
|
|||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-master" {
|
resource "aws_iam_instance_profile" "kube-master" {
|
||||||
name = "kube_${var.aws_cluster_name}_master_profile"
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
roles = ["${aws_iam_role.kube-master.name}"]
|
role = "${aws_iam_role.kube-master.name}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kube-worker" {
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
name = "kube_${var.aws_cluster_name}_node_profile"
|
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||||
roles = ["${aws_iam_role.kube-worker.name}"]
|
role = "${aws_iam_role.kube-worker.name}"
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,9 +6,9 @@ resource "aws_vpc" "cluster-vpc" {
|
|||||||
enable_dns_support = true
|
enable_dns_support = true
|
||||||
enable_dns_hostnames = true
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
"Name", "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -18,13 +18,13 @@ resource "aws_eip" "cluster-nat-eip" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||||
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||||
@@ -33,9 +33,10 @@ resource "aws_subnet" "cluster-vpc-subnets-public" {
|
|||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public",
|
||||||
}
|
"kubernetes.io/cluster/${var.aws_cluster_name}", "member"
|
||||||
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
@@ -51,9 +52,9 @@ resource "aws_subnet" "cluster-vpc-subnets-private" {
|
|||||||
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
"Name", "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
#Routing in VPC
|
#Routing in VPC
|
||||||
@@ -66,9 +67,10 @@ resource "aws_route_table" "kubernetes-public" {
|
|||||||
cidr_block = "0.0.0.0/0"
|
cidr_block = "0.0.0.0/0"
|
||||||
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||||
}
|
}
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table" "kubernetes-private" {
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
@@ -78,9 +80,11 @@ resource "aws_route_table" "kubernetes-private" {
|
|||||||
cidr_block = "0.0.0.0/0"
|
cidr_block = "0.0.0.0/0"
|
||||||
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||||
}
|
}
|
||||||
tags {
|
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
tags = "${merge(var.default_tags, map(
|
||||||
}
|
"Name", "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
|
))}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_route_table_association" "kubernetes-public" {
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
@@ -104,9 +108,9 @@ resource "aws_security_group" "kubernetes" {
|
|||||||
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
tags {
|
tags = "${merge(var.default_tags, map(
|
||||||
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
"Name", "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
}
|
))}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "aws_security_group_rule" "allow-all-ingress" {
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
|
|||||||
@@ -14,3 +14,8 @@ output "aws_security_group" {
|
|||||||
value = ["${aws_security_group.kubernetes.*.id}"]
|
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "default_tags" {
|
||||||
|
value = "${var.default_tags}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
@@ -22,3 +22,8 @@ variable "aws_cidr_subnets_public" {
|
|||||||
description = "CIDR Blocks for public subnets in Availability zones"
|
description = "CIDR Blocks for public subnets in Availability zones"
|
||||||
type = "list"
|
type = "list"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Default tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
@@ -18,3 +18,11 @@ output "etcd" {
|
|||||||
output "aws_elb_api_fqdn" {
|
output "aws_elb_api_fqdn" {
|
||||||
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "inventory" {
|
||||||
|
value = "${data.template_file.inventory.rendered}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "default_tags" {
|
||||||
|
value = "${var.default_tags}"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
[all]
|
||||||
${connection_strings_master}
|
${connection_strings_master}
|
||||||
${connection_strings_node}
|
${connection_strings_node}
|
||||||
${connection_strings_etcd}
|
${connection_strings_etcd}
|
||||||
@@ -23,5 +24,4 @@ kube-master
|
|||||||
|
|
||||||
|
|
||||||
[k8s-cluster:vars]
|
[k8s-cluster:vars]
|
||||||
${elb_api_fqdn}
|
${elb_api_fqdn}
|
||||||
${elb_api_port}
|
|
||||||
@@ -5,11 +5,9 @@ aws_cluster_name = "devtest"
|
|||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
|
||||||
|
|
||||||
#Bastion Host
|
#Bastion Host
|
||||||
aws_bastion_ami = "ami-5900cc36"
|
aws_bastion_size = "t2.medium"
|
||||||
aws_bastion_size = "t2.small"
|
|
||||||
|
|
||||||
|
|
||||||
#Kubernetes Cluster
|
#Kubernetes Cluster
|
||||||
@@ -23,9 +21,13 @@ aws_etcd_size = "t2.medium"
|
|||||||
aws_kube_worker_num = 4
|
aws_kube_worker_num = 4
|
||||||
aws_kube_worker_size = "t2.medium"
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
aws_cluster_ami = "ami-903df7ff"
|
|
||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = "0.0.0.0"
|
||||||
|
|
||||||
|
default_tags = {
|
||||||
|
# Env = "devtest"
|
||||||
|
# Product = "kubernetes"
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#Global Vars
|
#Global Vars
|
||||||
aws_cluster_name = "devtest"
|
aws_cluster_name = "devtest"
|
||||||
aws_region = "eu-central-1"
|
|
||||||
|
|
||||||
#VPC Vars
|
#VPC Vars
|
||||||
aws_vpc_cidr_block = "10.250.192.0/18"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
@@ -28,5 +27,6 @@ aws_cluster_ami = "ami-903df7ff"
|
|||||||
|
|
||||||
#Settings AWS ELB
|
#Settings AWS ELB
|
||||||
|
|
||||||
aws_elb_api_port = 443
|
aws_elb_api_port = 6443
|
||||||
k8s_secure_api_port = 443
|
k8s_secure_api_port = 6443
|
||||||
|
kube_insecure_apiserver_address = 0.0.0.0
|
||||||
|
|||||||
@@ -20,6 +20,21 @@ variable "aws_cluster_name" {
|
|||||||
description = "Name of AWS Cluster"
|
description = "Name of AWS Cluster"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
data "aws_ami" "distro" {
|
||||||
|
most_recent = true
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "name"
|
||||||
|
values = ["CoreOS-stable-*"]
|
||||||
|
}
|
||||||
|
|
||||||
|
filter {
|
||||||
|
name = "virtualization-type"
|
||||||
|
values = ["hvm"]
|
||||||
|
}
|
||||||
|
|
||||||
|
owners = ["595879546273"] #CoreOS
|
||||||
|
}
|
||||||
|
|
||||||
//AWS VPC Variables
|
//AWS VPC Variables
|
||||||
|
|
||||||
@@ -27,11 +42,6 @@ variable "aws_vpc_cidr_block" {
|
|||||||
description = "CIDR Block for VPC"
|
description = "CIDR Block for VPC"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_avail_zones" {
|
|
||||||
description = "Availability Zones Used"
|
|
||||||
type = "list"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "aws_cidr_subnets_private" {
|
variable "aws_cidr_subnets_private" {
|
||||||
description = "CIDR Blocks for private subnets in Availability Zones"
|
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||||
type = "list"
|
type = "list"
|
||||||
@@ -44,10 +54,6 @@ variable "aws_cidr_subnets_public" {
|
|||||||
|
|
||||||
//AWS EC2 Settings
|
//AWS EC2 Settings
|
||||||
|
|
||||||
variable "aws_bastion_ami" {
|
|
||||||
description = "AMI ID for Bastion Host in chosen AWS Region"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "aws_bastion_size" {
|
variable "aws_bastion_size" {
|
||||||
description = "EC2 Instance Size of Bastion Host"
|
description = "EC2 Instance Size of Bastion Host"
|
||||||
}
|
}
|
||||||
@@ -81,9 +87,6 @@ variable "aws_kube_worker_size" {
|
|||||||
description = "Instance size of Kubernetes Worker Nodes"
|
description = "Instance size of Kubernetes Worker Nodes"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "aws_cluster_ami" {
|
|
||||||
description = "AMI ID for Kubernetes Cluster"
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* AWS ELB Settings
|
* AWS ELB Settings
|
||||||
*
|
*
|
||||||
@@ -95,3 +98,8 @@ variable "aws_elb_api_port" {
|
|||||||
variable "k8s_secure_api_port" {
|
variable "k8s_secure_api_port" {
|
||||||
description = "Secure Port of K8S API Server"
|
description = "Secure Port of K8S API Server"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "default_tags" {
|
||||||
|
description = "Default tags for all resources"
|
||||||
|
type = "map"
|
||||||
|
}
|
||||||
|
|||||||
4
contrib/terraform/openstack/.gitignore
vendored
Normal file
4
contrib/terraform/openstack/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
.terraform
|
||||||
|
*.tfvars
|
||||||
|
*.tfstate
|
||||||
|
*.tfstate.backup
|
||||||
@@ -5,53 +5,287 @@ Openstack.
|
|||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
This will install a Kubernetes cluster on an Openstack Cloud. It should work on
|
||||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
most modern installs of OpenStack that support the basic services.
|
||||||
services.
|
|
||||||
|
|
||||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
## Approach
|
||||||
|
The terraform configuration inspects variables found in
|
||||||
|
[variables.tf](variables.tf) to create resources in your OpenStack cluster.
|
||||||
|
There is a [python script](../terraform.py) that reads the generated`.tfstate`
|
||||||
|
file to generate a dynamic inventory that is consumed by the main ansible script
|
||||||
|
to actually install kubernetes and stand up the cluster.
|
||||||
|
|
||||||
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
### Networking
|
||||||
* you already have a suitable OS image in glance
|
The configuration includes creating a private subnet with a router to the
|
||||||
* you already have both an internal network and a floating-ip pool created
|
external net. It will allocate floating IPs from a pool and assign them to the
|
||||||
* you have security-groups enabled
|
hosts where that makes sense. You have the option of creating bastion hosts
|
||||||
|
inside the private subnet to access the nodes there. Alternatively, a node with
|
||||||
|
a floating IP can be used as a jump host to nodes without.
|
||||||
|
|
||||||
|
### Kubernetes Nodes
|
||||||
|
You can create many different kubernetes topologies by setting the number of
|
||||||
|
different classes of hosts. For each class there are options for allocating
|
||||||
|
floating IP addresses or not.
|
||||||
|
- Master nodes with etcd
|
||||||
|
- Master nodes without etcd
|
||||||
|
- Standalone etcd hosts
|
||||||
|
- Kubernetes worker nodes
|
||||||
|
|
||||||
|
Note that the Ansible script will report an invalid configuration if you wind up
|
||||||
|
with an even number of etcd instances since that is not a valid configuration.
|
||||||
|
|
||||||
|
### GlusterFS
|
||||||
|
The Terraform configuration supports provisioning of an optional GlusterFS
|
||||||
|
shared file system based on a separate set of VMs. To enable this, you need to
|
||||||
|
specify:
|
||||||
|
- the number of Gluster hosts (minimum 2)
|
||||||
|
- Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks
|
||||||
|
- Other properties related to provisioning the hosts
|
||||||
|
|
||||||
|
Even if you are using Container Linux by CoreOS for your cluster, you will still
|
||||||
|
need the GlusterFS VMs to be based on either Debian or RedHat based images.
|
||||||
|
Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through
|
||||||
|
binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
## Requirements
|
## Requirements
|
||||||
|
|
||||||
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
- [Install Terraform](https://www.terraform.io/intro/getting-started/install.html)
|
||||||
|
- [Install Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html)
|
||||||
|
- you already have a suitable OS image in Glance
|
||||||
|
- you already have a floating IP pool created
|
||||||
|
- you have security groups enabled
|
||||||
|
- you have a pair of keys generated that can be used to secure the new hosts
|
||||||
|
|
||||||
|
## Module Architecture
|
||||||
|
The configuration is divided into three modules:
|
||||||
|
- Network
|
||||||
|
- IPs
|
||||||
|
- Compute
|
||||||
|
|
||||||
|
The main reason for splitting the configuration up in this way is to easily
|
||||||
|
accommodate situations where floating IPs are limited by a quota or if you have
|
||||||
|
any external references to the floating IP (e.g. DNS) that would otherwise have
|
||||||
|
to be updated.
|
||||||
|
|
||||||
|
You can force your existing IPs by modifying the compute variables in
|
||||||
|
`kubespray.tf` as follows:
|
||||||
|
|
||||||
|
```
|
||||||
|
k8s_master_fips = ["151.101.129.67"]
|
||||||
|
k8s_node_fips = ["151.101.129.68"]
|
||||||
|
```
|
||||||
|
|
||||||
## Terraform
|
## Terraform
|
||||||
|
Terraform will be used to provision all of the OpenStack resources with base software as appropriate.
|
||||||
|
|
||||||
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
### Configuration
|
||||||
requirements.
|
|
||||||
|
|
||||||
### Prep
|
#### Inventory files
|
||||||
|
|
||||||
#### OpenStack
|
Create an inventory directory for your cluster by copying the existing sample and linking the `hosts` script (used to build the inventory based on Terraform state):
|
||||||
|
|
||||||
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
```ShellSession
|
||||||
|
$ cp -LRp contrib/terraform/openstack/sample-inventory inventory/$CLUSTER
|
||||||
```
|
$ cd inventory/$CLUSTER
|
||||||
$ source ~/.stackrc
|
$ ln -s ../../contrib/terraform/openstack/hosts
|
||||||
```
|
```
|
||||||
|
|
||||||
You will need two networks before installing, an internal network and
|
This will be the base for subsequent Terraform commands.
|
||||||
an external (floating IP Pool) network. The internet network can be shared as
|
|
||||||
we use security groups to provide network segregation. Due to the many
|
|
||||||
differences between OpenStack installs the Terraform does not attempt to create
|
|
||||||
these for you.
|
|
||||||
|
|
||||||
By default Terraform will expect that your networks are called `internal` and
|
#### OpenStack access and credentials
|
||||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
|
||||||
|
|
||||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
No provider variables are hardcoded inside `variables.tf` because Terraform
|
||||||
|
supports various authentication methods for OpenStack: the older script and
|
||||||
|
environment method (using `openrc`) as well as a newer declarative method, and
|
||||||
|
different OpenStack environments may support Identity API version 2 or 3.
|
||||||
|
|
||||||
All OpenStack resources will use the Terraform variable `cluster_name` (
|
These are examples and may vary depending on your OpenStack cloud provider,
|
||||||
default `example`) in their name to make it easier to track. For example the
|
for an exhaustive list on how to authenticate on OpenStack with Terraform
|
||||||
first compute resource will be named `example-kubernetes-1`.
|
please read the [OpenStack provider documentation](https://www.terraform.io/docs/providers/openstack/).
|
||||||
|
|
||||||
#### Terraform
|
##### Declarative method (recommended)
|
||||||
|
|
||||||
|
The recommended authentication method is to describe credentials in a YAML file `clouds.yaml` that can be stored in:
|
||||||
|
|
||||||
|
* the current directory
|
||||||
|
* `~/.config/openstack`
|
||||||
|
* `/etc/openstack`
|
||||||
|
|
||||||
|
`clouds.yaml`:
|
||||||
|
|
||||||
|
```
|
||||||
|
clouds:
|
||||||
|
mycloud:
|
||||||
|
auth:
|
||||||
|
auth_url: https://openstack:5000/v3
|
||||||
|
username: "username"
|
||||||
|
project_name: "projectname"
|
||||||
|
project_id: projectid
|
||||||
|
user_domain_name: "Default"
|
||||||
|
password: "password"
|
||||||
|
region_name: "RegionOne"
|
||||||
|
interface: "public"
|
||||||
|
identity_api_version: 3
|
||||||
|
```
|
||||||
|
|
||||||
|
If you have multiple clouds defined in your `clouds.yaml` file you can choose
|
||||||
|
the one you want to use with the environment variable `OS_CLOUD`:
|
||||||
|
|
||||||
|
```
|
||||||
|
export OS_CLOUD=mycloud
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Openrc method (deprecated)
|
||||||
|
|
||||||
|
When using classic environment variables, Terraform uses default `OS_*`
|
||||||
|
environment variables. A script suitable for your environment may be available
|
||||||
|
from Horizon under *Project* -> *Compute* -> *Access & Security* -> *API Access*.
|
||||||
|
|
||||||
|
With identity v2:
|
||||||
|
|
||||||
|
```
|
||||||
|
source openrc
|
||||||
|
|
||||||
|
env | grep OS
|
||||||
|
|
||||||
|
OS_AUTH_URL=https://openstack:5000/v2.0
|
||||||
|
OS_PROJECT_ID=projectid
|
||||||
|
OS_PROJECT_NAME=projectname
|
||||||
|
OS_USERNAME=username
|
||||||
|
OS_PASSWORD=password
|
||||||
|
OS_REGION_NAME=RegionOne
|
||||||
|
OS_INTERFACE=public
|
||||||
|
OS_IDENTITY_API_VERSION=2
|
||||||
|
```
|
||||||
|
|
||||||
|
With identity v3:
|
||||||
|
|
||||||
|
```
|
||||||
|
source openrc
|
||||||
|
|
||||||
|
env | grep OS
|
||||||
|
|
||||||
|
OS_AUTH_URL=https://openstack:5000/v3
|
||||||
|
OS_PROJECT_ID=projectid
|
||||||
|
OS_PROJECT_NAME=username
|
||||||
|
OS_PROJECT_DOMAIN_ID=default
|
||||||
|
OS_USERNAME=username
|
||||||
|
OS_PASSWORD=password
|
||||||
|
OS_REGION_NAME=RegionOne
|
||||||
|
OS_INTERFACE=public
|
||||||
|
OS_IDENTITY_API_VERSION=3
|
||||||
|
OS_USER_DOMAIN_NAME=Default
|
||||||
|
```
|
||||||
|
|
||||||
|
Terraform does not support a mix of DomainName and DomainID, choose one or the
|
||||||
|
other:
|
||||||
|
|
||||||
|
```
|
||||||
|
* provider.openstack: You must provide exactly one of DomainID or DomainName to authenticate by Username
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
unset OS_USER_DOMAIN_NAME
|
||||||
|
export OS_USER_DOMAIN_ID=default
|
||||||
|
|
||||||
|
or
|
||||||
|
|
||||||
|
unset OS_PROJECT_DOMAIN_ID
|
||||||
|
set OS_PROJECT_DOMAIN_NAME=Default
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Cluster variables
|
||||||
|
The construction of the cluster is driven by values found in
|
||||||
|
[variables.tf](variables.tf).
|
||||||
|
|
||||||
|
For your cluster, edit `inventory/$CLUSTER/cluster.tf`.
|
||||||
|
|
||||||
|
|Variable | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
|`cluster_name` | All OpenStack resources will use the Terraform variable`cluster_name` (default`example`) in their name to make it easier to track. For example the first compute resource will be named`example-kubernetes-1`. |
|
||||||
|
|`network_name` | The name to be given to the internal network that will be generated |
|
||||||
|
|`dns_nameservers`| An array of DNS name server names to be used by hosts in the internal subnet. |
|
||||||
|
|`floatingip_pool` | Name of the pool from which floating IPs will be allocated |
|
||||||
|
|`external_net` | UUID of the external network that will be routed to |
|
||||||
|
|`flavor_k8s_master`,`flavor_k8s_node`,`flavor_etcd`, `flavor_bastion`,`flavor_gfs_node` | Flavor depends on your openstack installation, you can get available flavor IDs through`nova flavor-list` |
|
||||||
|
|`image`,`image_gfs` | Name of the image to use in provisioning the compute resources. Should already be loaded into glance. |
|
||||||
|
|`ssh_user`,`ssh_user_gfs` | The username to ssh into the image with. This usually depends on the image you have selected |
|
||||||
|
|`public_key_path` | Path on your local workstation to the public key file you wish to use in creating the key pairs |
|
||||||
|
|`number_of_k8s_masters`, `number_of_k8s_masters_no_floating_ip` | Number of nodes that serve as both master and etcd. These can be provisioned with or without floating IP addresses|
|
||||||
|
|`number_of_k8s_masters_no_etcd`, `number_of_k8s_masters_no_floating_ip_no_etcd` | Number of nodes that serve as just master with no etcd. These can be provisioned with or without floating IP addresses |
|
||||||
|
|`number_of_etcd` | Number of pure etcd nodes |
|
||||||
|
|`number_of_k8s_nodes`, `number_of_k8s_nodes_no_floating_ip` | Kubernetes worker nodes. These can be provisioned with or without floating ip addresses. |
|
||||||
|
|`number_of_bastions` | Number of bastion hosts to create. Scripts assume this is really just zero or one |
|
||||||
|
|`number_of_gfs_nodes_no_floating_ip` | Number of gluster servers to provision. |
|
||||||
|
| `gfs_volume_size_in_gb` | Size of the non-ephemeral volumes to be attached to store the GlusterFS bricks |
|
||||||
|
|
||||||
|
#### Terraform state files
|
||||||
|
|
||||||
|
In the cluster's inventory folder, the following files might be created (either by Terraform
|
||||||
|
or manually), to prevent you from pushing them accidentally they are in a
|
||||||
|
`.gitignore` file in the `terraform/openstack` directory :
|
||||||
|
|
||||||
|
* `.terraform`
|
||||||
|
* `.tfvars`
|
||||||
|
* `.tfstate`
|
||||||
|
* `.tfstate.backup`
|
||||||
|
|
||||||
|
You can still add them manually if you want to.
|
||||||
|
|
||||||
|
### Initialization
|
||||||
|
|
||||||
|
Before Terraform can operate on your cluster you need to install the required
|
||||||
|
plugins. This is accomplished as follows:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ cd inventory/$CLUSTER
|
||||||
|
$ terraform init ../../contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
This should finish fairly quickly telling you Terraform has successfully initialized and loaded necessary modules.
|
||||||
|
|
||||||
|
### Provisioning cluster
|
||||||
|
You can apply the Terraform configuration to your cluster with the following command
|
||||||
|
issued from your cluster's inventory directory (`inventory/$CLUSTER`):
|
||||||
|
```ShellSession
|
||||||
|
$ terraform apply -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
if you chose to create a bastion host, this script will create
|
||||||
|
`contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for Ansible to
|
||||||
|
be able to access your machines tunneling through the bastion's IP address. If
|
||||||
|
you want to manually handle the ssh tunneling to these machines, please delete
|
||||||
|
or move that file. If you want to use this, just leave it there, as ansible will
|
||||||
|
pick it up automatically.
|
||||||
|
|
||||||
|
### Destroying cluster
|
||||||
|
You can destroy your new cluster with the following command issued from the cluster's inventory directory:
|
||||||
|
|
||||||
|
```ShellSession
|
||||||
|
$ terraform destroy -var-file=cluster.tf ../../contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
If you've started the Ansible run, it may also be a good idea to do some manual cleanup:
|
||||||
|
|
||||||
|
* remove SSH keys from the destroyed cluster from your `~/.ssh/known_hosts` file
|
||||||
|
* clean up any temporary cache files: `rm /tmp/$CLUSTER-*`
|
||||||
|
|
||||||
|
### Debugging
|
||||||
|
You can enable debugging output from Terraform by setting
|
||||||
|
`OS_DEBUG` to 1 and`TF_LOG` to`DEBUG` before running the Terraform command.
|
||||||
|
|
||||||
|
### Terraform output
|
||||||
|
|
||||||
|
Terraform can output values that are useful for configure Neutron/Octavia LBaaS or Cinder persistent volume provisioning as part of your Kubernetes deployment:
|
||||||
|
|
||||||
|
- `private_subnet_id`: the subnet where your instances are running is used for `openstack_lbaas_subnet_id`
|
||||||
|
- `floating_network_id`: the network_id where the floating IP are provisioned is used for `openstack_lbaas_floating_network_id`
|
||||||
|
|
||||||
|
## Ansible
|
||||||
|
|
||||||
|
### Node access
|
||||||
|
|
||||||
|
#### SSH
|
||||||
|
|
||||||
Ensure your local ssh-agent is running and your ssh key has been added. This
|
Ensure your local ssh-agent is running and your ssh key has been added. This
|
||||||
step is required by the terraform provisioner:
|
step is required by the terraform provisioner:
|
||||||
@@ -61,77 +295,22 @@ $ eval $(ssh-agent -s)
|
|||||||
$ ssh-add ~/.ssh/id_rsa
|
$ ssh-add ~/.ssh/id_rsa
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you have deployed and destroyed a previous iteration of your cluster, you will need to clear out any stale keys from your SSH "known hosts" file ( `~/.ssh/known_hosts`).
|
||||||
|
|
||||||
Ensure that you have your Openstack credentials loaded into Terraform
|
#### Bastion host
|
||||||
environment variables. Likely via a command similar to:
|
|
||||||
|
If you are not using a bastion host, but not all of your nodes have floating IPs, create a file `inventory/$CLUSTER/group_vars/no-floating.yml` with the following content. Use one of your nodes with a floating IP (this should have been output at the end of the Terraform step) and the appropriate user for that OS, or if you have another jump host, use that.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ echo Setting up Terraform creds && \
|
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@MASTER_IP"'
|
||||||
export TF_VAR_username=${OS_USERNAME} && \
|
|
||||||
export TF_VAR_password=${OS_PASSWORD} && \
|
|
||||||
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
|
||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
#### Test access
|
||||||
|
|
||||||
|
Make sure you can connect to the hosts. Note that Container Linux by CoreOS will have a state `FAILED` due to Python not being present. This is okay, because Python will be installed during bootstrapping, so long as the hosts are not `UNREACHABLE`.
|
||||||
|
|
||||||
```
|
```
|
||||||
number_of_k8s_masters = "1"
|
$ ansible -i inventory/$CLUSTER/hosts -m ping all
|
||||||
number_of_k8s_masters_no_floating_ip = "2"
|
|
||||||
number_of_k8s_nodes_no_floating_ip = "1"
|
|
||||||
number_of_k8s_nodes = "0"
|
|
||||||
```
|
|
||||||
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
|
||||||
|
|
||||||
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
|
||||||
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
|
||||||
# This is the name of an image already available in your openstack installation.
|
|
||||||
image_gfs = "Ubuntu 15.10"
|
|
||||||
number_of_gfs_nodes_no_floating_ip = "3"
|
|
||||||
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
|
||||||
gfs_volume_size_in_gb = "50"
|
|
||||||
# The user needed for the image choosen for GlusterFS.
|
|
||||||
ssh_user_gfs = "ubuntu"
|
|
||||||
```
|
|
||||||
|
|
||||||
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
|
||||||
|
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
|
||||||
|
|
||||||
If not using a tfvars file for your setup, then execute:
|
|
||||||
```
|
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
|
||||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
|
||||||
description: "" => "example - Kubernetes Master"
|
|
||||||
name: "" => "example-k8s-master"
|
|
||||||
rule.#: "" => "<computed>"
|
|
||||||
...
|
|
||||||
...
|
|
||||||
Apply complete! Resources: 9 added, 0 changed, 0 destroyed.
|
|
||||||
|
|
||||||
The state of your infrastructure has been saved to the path
|
|
||||||
below. This state is required to modify and destroy your
|
|
||||||
infrastructure, so keep it safe. To inspect the complete state
|
|
||||||
use the `terraform show` command.
|
|
||||||
|
|
||||||
State path: contrib/terraform/openstack/terraform.tfstate
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
|
||||||
```
|
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
|
||||||
```
|
|
||||||
|
|
||||||
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
|
||||||
|
|
||||||
Make sure you can connect to the hosts:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ ansible -i contrib/terraform/openstack/hosts -m ping all
|
|
||||||
example-k8s_node-1 | SUCCESS => {
|
example-k8s_node-1 | SUCCESS => {
|
||||||
"changed": false,
|
"changed": false,
|
||||||
"ping": "pong"
|
"ping": "pong"
|
||||||
@@ -146,26 +325,113 @@ example-k8s-master-1 | SUCCESS => {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
If it fails try to connect manually via SSH. It could be something as simple as a stale host key.
|
||||||
|
|
||||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
### Configure cluster variables
|
||||||
|
|
||||||
Deploy kubernetes:
|
|
||||||
|
|
||||||
|
Edit `inventory/$CLUSTER/group_vars/all.yml`:
|
||||||
|
- Set variable **bootstrap_os** appropriately for your desired image:
|
||||||
```
|
```
|
||||||
$ ansible-playbook --become -i contrib/terraform/openstack/hosts cluster.yml
|
# Valid bootstrap options (required): ubuntu, coreos, centos, none
|
||||||
|
bootstrap_os: coreos
|
||||||
|
```
|
||||||
|
- **bin_dir**:
|
||||||
|
```
|
||||||
|
# Directory where the binaries will be installed
|
||||||
|
# Default:
|
||||||
|
# bin_dir: /usr/local/bin
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
bin_dir: /opt/bin
|
||||||
|
```
|
||||||
|
- and **cloud_provider**:
|
||||||
|
```
|
||||||
|
cloud_provider: openstack
|
||||||
|
```
|
||||||
|
Edit `inventory/$CLUSTER/group_vars/k8s-cluster.yml`:
|
||||||
|
- Set variable **kube_network_plugin** to your desired networking plugin.
|
||||||
|
- **flannel** works out-of-the-box
|
||||||
|
- **calico** requires [configuring OpenStack Neutron ports](/docs/openstack.md) to allow service and pod subnets
|
||||||
|
```
|
||||||
|
# Choose network plugin (calico, weave or flannel)
|
||||||
|
# Can also be set to 'cloud', which lets the cloud provider setup appropriate routing
|
||||||
|
kube_network_plugin: flannel
|
||||||
|
```
|
||||||
|
- Set variable **resolvconf_mode**
|
||||||
|
```
|
||||||
|
# Can be docker_dns, host_resolvconf or none
|
||||||
|
# Default:
|
||||||
|
# resolvconf_mode: docker_dns
|
||||||
|
# For Container Linux by CoreOS:
|
||||||
|
resolvconf_mode: host_resolvconf
|
||||||
```
|
```
|
||||||
|
|
||||||
# clean up:
|
### Deploy Kubernetes
|
||||||
|
|
||||||
```
|
```
|
||||||
$ terraform destroy
|
$ ansible-playbook --become -i inventory/$CLUSTER/hosts cluster.yml
|
||||||
Do you really want to destroy?
|
|
||||||
Terraform will delete all your managed infrastructure.
|
|
||||||
There is no undo. Only 'yes' will be accepted to confirm.
|
|
||||||
|
|
||||||
Enter a value: yes
|
|
||||||
...
|
|
||||||
...
|
|
||||||
Apply complete! Resources: 0 added, 0 changed, 12 destroyed.
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
This will take some time as there are many tasks to run.
|
||||||
|
|
||||||
|
## Kubernetes
|
||||||
|
|
||||||
|
### Set up kubectl
|
||||||
|
1. [Install kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your workstation
|
||||||
|
2. Add a route to the internal IP of a master node (if needed):
|
||||||
|
```
|
||||||
|
sudo route add [master-internal-ip] gw [router-ip]
|
||||||
|
```
|
||||||
|
or
|
||||||
|
```
|
||||||
|
sudo route add -net [internal-subnet]/24 gw [router-ip]
|
||||||
|
```
|
||||||
|
3. List Kubernetes certificates & keys:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo ls /etc/kubernetes/ssl/
|
||||||
|
```
|
||||||
|
4. Get `admin`'s certificates and keys:
|
||||||
|
```
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1-key.pem > admin-key.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/admin-[cluster_name]-k8s-master-1.pem > admin.pem
|
||||||
|
ssh [os-user]@[master-ip] sudo cat /etc/kubernetes/ssl/ca.pem > ca.pem
|
||||||
|
```
|
||||||
|
5. Configure kubectl:
|
||||||
|
```ShellSession
|
||||||
|
$ kubectl config set-cluster default-cluster --server=https://[master-internal-ip]:6443 \
|
||||||
|
--certificate-authority=ca.pem
|
||||||
|
|
||||||
|
$ kubectl config set-credentials default-admin \
|
||||||
|
--certificate-authority=ca.pem \
|
||||||
|
--client-key=admin-key.pem \
|
||||||
|
--client-certificate=admin.pem
|
||||||
|
|
||||||
|
$ kubectl config set-context default-system --cluster=default-cluster --user=default-admin
|
||||||
|
$ kubectl config use-context default-system
|
||||||
|
```
|
||||||
|
7. Check it:
|
||||||
|
```
|
||||||
|
kubectl version
|
||||||
|
```
|
||||||
|
|
||||||
|
If you are using floating ip addresses then you may get this error:
|
||||||
|
```
|
||||||
|
Unable to connect to the server: x509: certificate is valid for 10.0.0.6, 10.0.0.6, 10.233.0.1, 127.0.0.1, not 132.249.238.25
|
||||||
|
```
|
||||||
|
|
||||||
|
You can tell kubectl to ignore this condition by adding the
|
||||||
|
`--insecure-skip-tls-verify` option.
|
||||||
|
|
||||||
|
## GlusterFS
|
||||||
|
GlusterFS is not deployed by the standard`cluster.yml` playbook, see the
|
||||||
|
[GlusterFS playbook documentation](../../network-storage/glusterfs/README.md)
|
||||||
|
for instructions.
|
||||||
|
|
||||||
|
Basically you will install Gluster as
|
||||||
|
```ShellSession
|
||||||
|
$ ansible-playbook --become -i inventory/$CLUSTER/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## What's next
|
||||||
|
|
||||||
|
Try out your new Kubernetes cluster with the [Hello Kubernetes service](https://kubernetes.io/docs/tasks/access-application-cluster/service-access-application-cluster/).
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
ansible_ssh_common_args: "-o ProxyCommand='ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -W %h:%p -q USER@BASTION_ADDRESS {% if ansible_ssh_private_key_file is defined %}-i {{ ansible_ssh_private_key_file }}{% endif %}'"
|
||||||
|
|||||||
@@ -1,167 +1,77 @@
|
|||||||
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
module "network" {
|
||||||
count = "${var.number_of_k8s_masters}"
|
source = "modules/network"
|
||||||
pool = "${var.floatingip_pool}"
|
|
||||||
|
external_net = "${var.external_net}"
|
||||||
|
network_name = "${var.network_name}"
|
||||||
|
cluster_name = "${var.cluster_name}"
|
||||||
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
module "ips" {
|
||||||
count = "${var.number_of_k8s_nodes}"
|
source = "modules/ips"
|
||||||
pool = "${var.floatingip_pool}"
|
|
||||||
|
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||||
|
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||||
|
floatingip_pool = "${var.floatingip_pool}"
|
||||||
|
number_of_bastions = "${var.number_of_bastions}"
|
||||||
|
external_net = "${var.external_net}"
|
||||||
|
network_name = "${var.network_name}"
|
||||||
|
router_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
module "compute" {
|
||||||
|
source = "modules/compute"
|
||||||
|
|
||||||
resource "openstack_compute_keypair_v2" "k8s" {
|
cluster_name = "${var.cluster_name}"
|
||||||
name = "kubernetes-${var.cluster_name}"
|
number_of_k8s_masters = "${var.number_of_k8s_masters}"
|
||||||
public_key = "${file(var.public_key_path)}"
|
number_of_k8s_masters_no_etcd = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
number_of_etcd = "${var.number_of_etcd}"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
|
number_of_k8s_masters_no_floating_ip_no_etcd = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||||
|
number_of_k8s_nodes = "${var.number_of_k8s_nodes}"
|
||||||
|
number_of_bastions = "${var.number_of_bastions}"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
|
number_of_gfs_nodes_no_floating_ip = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
gfs_volume_size_in_gb = "${var.gfs_volume_size_in_gb}"
|
||||||
|
public_key_path = "${var.public_key_path}"
|
||||||
|
image = "${var.image}"
|
||||||
|
image_gfs = "${var.image_gfs}"
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
ssh_user_gfs = "${var.ssh_user_gfs}"
|
||||||
|
flavor_k8s_master = "${var.flavor_k8s_master}"
|
||||||
|
flavor_k8s_node = "${var.flavor_k8s_node}"
|
||||||
|
flavor_etcd = "${var.flavor_etcd}"
|
||||||
|
flavor_gfs_node = "${var.flavor_gfs_node}"
|
||||||
|
network_name = "${var.network_name}"
|
||||||
|
flavor_bastion = "${var.flavor_bastion}"
|
||||||
|
k8s_master_fips = "${module.ips.k8s_master_fips}"
|
||||||
|
k8s_node_fips = "${module.ips.k8s_node_fips}"
|
||||||
|
bastion_fips = "${module.ips.bastion_fips}"
|
||||||
|
|
||||||
|
network_id = "${module.network.router_id}"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
output "private_subnet_id" {
|
||||||
name = "${var.cluster_name}-k8s-master"
|
value = "${module.network.subnet_id}"
|
||||||
description = "${var.cluster_name} - Kubernetes Master"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_secgroup_v2" "k8s" {
|
output "floating_network_id" {
|
||||||
name = "${var.cluster_name}-k8s"
|
value = "${var.external_net}"
|
||||||
description = "${var.cluster_name} - Kubernetes"
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "22"
|
|
||||||
to_port = "22"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
ip_protocol = "icmp"
|
|
||||||
from_port = "-1"
|
|
||||||
to_port = "-1"
|
|
||||||
cidr = "0.0.0.0/0"
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
ip_protocol = "tcp"
|
|
||||||
from_port = "1"
|
|
||||||
to_port = "65535"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
ip_protocol = "udp"
|
|
||||||
from_port = "1"
|
|
||||||
to_port = "65535"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
rule {
|
|
||||||
ip_protocol = "icmp"
|
|
||||||
from_port = "-1"
|
|
||||||
to_port = "-1"
|
|
||||||
self = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_master" {
|
output "router_id" {
|
||||||
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
value = "${module.network.router_id}"
|
||||||
count = "${var.number_of_k8s_masters}"
|
|
||||||
image_name = "${var.image}"
|
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
|
||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user}"
|
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
output "k8s_master_fips" {
|
||||||
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
value = "${module.ips.k8s_master_fips}"
|
||||||
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
|
||||||
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
|
||||||
image_name = "${var.image}"
|
|
||||||
flavor_id = "${var.flavor_k8s_master}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
|
||||||
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user}"
|
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node" {
|
output "k8s_node_fips" {
|
||||||
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
value = "${module.ips.k8s_node_fips}"
|
||||||
count = "${var.number_of_k8s_nodes}"
|
|
||||||
image_name = "${var.image}"
|
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
|
||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user}"
|
|
||||||
kubespray_groups = "kube-node,k8s-cluster,vault"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
output "bastion_fips" {
|
||||||
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
value = "${module.ips.bastion_fips}"
|
||||||
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
|
||||||
image_name = "${var.image}"
|
|
||||||
flavor_id = "${var.flavor_k8s_node}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user}"
|
|
||||||
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
|
||||||
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
|
||||||
description = "Non-ephemeral volume for GlusterFS"
|
|
||||||
size = "${var.gfs_volume_size_in_gb}"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
|
||||||
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
|
||||||
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
|
||||||
image_name = "${var.image_gfs}"
|
|
||||||
flavor_id = "${var.flavor_gfs_node}"
|
|
||||||
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
|
||||||
network {
|
|
||||||
name = "${var.network_name}"
|
|
||||||
}
|
|
||||||
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
|
||||||
metadata = {
|
|
||||||
ssh_user = "${var.ssh_user_gfs}"
|
|
||||||
kubespray_groups = "gfs-cluster,network-storage"
|
|
||||||
}
|
|
||||||
volume {
|
|
||||||
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#output "msg" {
|
|
||||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
|
||||||
#}
|
|
||||||
|
|||||||
306
contrib/terraform/openstack/modules/compute/main.tf
Normal file
306
contrib/terraform/openstack/modules/compute/main.tf
Normal file
@@ -0,0 +1,306 @@
|
|||||||
|
resource "openstack_compute_keypair_v2" "k8s" {
|
||||||
|
name = "kubernetes-${var.cluster_name}"
|
||||||
|
public_key = "${chomp(file(var.public_key_path))}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "k8s_master" {
|
||||||
|
name = "${var.cluster_name}-k8s-master"
|
||||||
|
description = "${var.cluster_name} - Kubernetes Master"
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_port = "6443"
|
||||||
|
to_port = "6443"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "bastion" {
|
||||||
|
name = "${var.cluster_name}-bastion"
|
||||||
|
description = "${var.cluster_name} - Bastion Server"
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_port = "22"
|
||||||
|
to_port = "22"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_secgroup_v2" "k8s" {
|
||||||
|
name = "${var.cluster_name}-k8s"
|
||||||
|
description = "${var.cluster_name} - Kubernetes"
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
from_port = "-1"
|
||||||
|
to_port = "-1"
|
||||||
|
cidr = "0.0.0.0/0"
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "tcp"
|
||||||
|
from_port = "1"
|
||||||
|
to_port = "65535"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "udp"
|
||||||
|
from_port = "1"
|
||||||
|
to_port = "65535"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
|
||||||
|
rule {
|
||||||
|
ip_protocol = "icmp"
|
||||||
|
from_port = "-1"
|
||||||
|
to_port = "-1"
|
||||||
|
self = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "bastion" {
|
||||||
|
name = "${var.cluster_name}-bastion-${count.index+1}"
|
||||||
|
count = "${var.number_of_bastions}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_bastion}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "bastion"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${var.bastion_fips[0]}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,kube-master,k8s-cluster,vault"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_etcd" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-ne-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-master,k8s-cluster,vault"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "etcd" {
|
||||||
|
name = "${var.cluster_name}-etcd-${count.index+1}"
|
||||||
|
count = "${var.number_of_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_etcd}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}"]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,vault,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,kube-master,k8s-cluster,vault,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip_no_etcd" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-ne-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_floating_ip_no_etcd}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-master,k8s-cluster,vault,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_node" {
|
||||||
|
name = "${var.cluster_name}-k8s-node-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.bastion.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_floatingip_associate_v2" "bastion" {
|
||||||
|
count = "${var.number_of_bastions}"
|
||||||
|
floating_ip = "${var.bastion_fips[count.index]}"
|
||||||
|
instance_id = "${element(openstack_compute_instance_v2.bastion.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_floatingip_associate_v2" "k8s_master" {
|
||||||
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
instance_id = "${element(openstack_compute_instance_v2.k8s_master.*.id, count.index)}"
|
||||||
|
floating_ip = "${var.k8s_master_fips[count.index]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_floatingip_associate_v2" "k8s_node" {
|
||||||
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
floating_ip = "${var.k8s_node_fips[count.index]}"
|
||||||
|
instance_id = "${element(openstack_compute_instance_v2.k8s_node.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||||
|
name = "${var.cluster_name}-glusterfs_volume-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
description = "Non-ephemeral volume for GlusterFS"
|
||||||
|
size = "${var.gfs_volume_size_in_gb}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image_gfs}"
|
||||||
|
flavor_id = "${var.flavor_gfs_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}",
|
||||||
|
"default",
|
||||||
|
]
|
||||||
|
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
kubespray_groups = "gfs-cluster,network-storage,no-floating"
|
||||||
|
depends_on = "${var.network_id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_volume_attach_v2" "glusterfs_volume" {
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
instance_id = "${element(openstack_compute_instance_v2.glusterfs_node_no_floating_ip.*.id, count.index)}"
|
||||||
|
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||||
|
}
|
||||||
57
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
57
contrib/terraform/openstack/modules/compute/variables.tf
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
variable "cluster_name" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_etcd" {}
|
||||||
|
|
||||||
|
variable "number_of_etcd" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes_no_floating_ip" {}
|
||||||
|
|
||||||
|
variable "number_of_bastions" {}
|
||||||
|
|
||||||
|
variable "number_of_gfs_nodes_no_floating_ip" {}
|
||||||
|
|
||||||
|
variable "gfs_volume_size_in_gb" {}
|
||||||
|
|
||||||
|
variable "public_key_path" {}
|
||||||
|
|
||||||
|
variable "image" {}
|
||||||
|
|
||||||
|
variable "image_gfs" {}
|
||||||
|
|
||||||
|
variable "ssh_user" {}
|
||||||
|
|
||||||
|
variable "ssh_user_gfs" {}
|
||||||
|
|
||||||
|
variable "flavor_k8s_master" {}
|
||||||
|
|
||||||
|
variable "flavor_k8s_node" {}
|
||||||
|
|
||||||
|
variable "flavor_etcd" {}
|
||||||
|
|
||||||
|
variable "flavor_gfs_node" {}
|
||||||
|
|
||||||
|
variable "network_name" {}
|
||||||
|
|
||||||
|
variable "flavor_bastion" {}
|
||||||
|
|
||||||
|
variable "network_id" {}
|
||||||
|
|
||||||
|
variable "k8s_master_fips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_node_fips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "bastion_fips" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
23
contrib/terraform/openstack/modules/ips/main.tf
Normal file
23
contrib/terraform/openstack/modules/ips/main.tf
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
resource "null_resource" "dummy_dependency" {
|
||||||
|
triggers {
|
||||||
|
dependency_id = "${var.router_id}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "k8s_master" {
|
||||||
|
count = "${var.number_of_k8s_masters}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "k8s_node" {
|
||||||
|
count = "${var.number_of_k8s_nodes}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_floatingip_v2" "bastion" {
|
||||||
|
count = "${var.number_of_bastions}"
|
||||||
|
pool = "${var.floatingip_pool}"
|
||||||
|
depends_on = ["null_resource.dummy_dependency"]
|
||||||
|
}
|
||||||
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
11
contrib/terraform/openstack/modules/ips/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
output "k8s_master_fips" {
|
||||||
|
value = ["${openstack_networking_floatingip_v2.k8s_master.*.address}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "k8s_node_fips" {
|
||||||
|
value = ["${openstack_networking_floatingip_v2.k8s_node.*.address}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "bastion_fips" {
|
||||||
|
value = ["${openstack_networking_floatingip_v2.bastion.*.address}"]
|
||||||
|
}
|
||||||
15
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
15
contrib/terraform/openstack/modules/ips/variables.tf
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
variable "number_of_k8s_masters" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_etcd" {}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes" {}
|
||||||
|
|
||||||
|
variable "floatingip_pool" {}
|
||||||
|
|
||||||
|
variable "number_of_bastions" {}
|
||||||
|
|
||||||
|
variable "external_net" {}
|
||||||
|
|
||||||
|
variable "network_name" {}
|
||||||
|
|
||||||
|
variable "router_id" {}
|
||||||
23
contrib/terraform/openstack/modules/network/main.tf
Normal file
23
contrib/terraform/openstack/modules/network/main.tf
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
resource "openstack_networking_router_v2" "k8s" {
|
||||||
|
name = "${var.cluster_name}-router"
|
||||||
|
admin_state_up = "true"
|
||||||
|
external_network_id = "${var.external_net}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_network_v2" "k8s" {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
admin_state_up = "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_subnet_v2" "k8s" {
|
||||||
|
name = "${var.cluster_name}-internal-network"
|
||||||
|
network_id = "${openstack_networking_network_v2.k8s.id}"
|
||||||
|
cidr = "10.0.0.0/24"
|
||||||
|
ip_version = 4
|
||||||
|
dns_nameservers = "${var.dns_nameservers}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_networking_router_interface_v2" "k8s" {
|
||||||
|
router_id = "${openstack_networking_router_v2.k8s.id}"
|
||||||
|
subnet_id = "${openstack_networking_subnet_v2.k8s.id}"
|
||||||
|
}
|
||||||
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
7
contrib/terraform/openstack/modules/network/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
output "router_id" {
|
||||||
|
value = "${openstack_networking_router_interface_v2.k8s.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "subnet_id" {
|
||||||
|
value = "${openstack_networking_subnet_v2.k8s.id}"
|
||||||
|
}
|
||||||
9
contrib/terraform/openstack/modules/network/variables.tf
Normal file
9
contrib/terraform/openstack/modules/network/variables.tf
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
variable "external_net" {}
|
||||||
|
|
||||||
|
variable "network_name" {}
|
||||||
|
|
||||||
|
variable "cluster_name" {}
|
||||||
|
|
||||||
|
variable "dns_nameservers" {
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
45
contrib/terraform/openstack/sample-inventory/cluster.tf
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
# your Kubernetes cluster name here
|
||||||
|
cluster_name = "i-didnt-read-the-docs"
|
||||||
|
|
||||||
|
# SSH key to use for access to nodes
|
||||||
|
public_key_path = "~/.ssh/id_rsa.pub"
|
||||||
|
|
||||||
|
# image to use for bastion, masters, standalone etcd instances, and nodes
|
||||||
|
image = "<image name>"
|
||||||
|
# user on the node (ex. core on Container Linux, ubuntu on Ubuntu, etc.)
|
||||||
|
ssh_user = "<cloud-provisioned user>"
|
||||||
|
|
||||||
|
# 0|1 bastion nodes
|
||||||
|
number_of_bastions = 0
|
||||||
|
#flavor_bastion = "<UUID>"
|
||||||
|
|
||||||
|
# standalone etcds
|
||||||
|
number_of_etcd = 0
|
||||||
|
|
||||||
|
# masters
|
||||||
|
number_of_k8s_masters = 1
|
||||||
|
number_of_k8s_masters_no_etcd = 0
|
||||||
|
number_of_k8s_masters_no_floating_ip = 0
|
||||||
|
number_of_k8s_masters_no_floating_ip_no_etcd = 0
|
||||||
|
flavor_k8s_master = "<UUID>"
|
||||||
|
|
||||||
|
# nodes
|
||||||
|
number_of_k8s_nodes = 2
|
||||||
|
number_of_k8s_nodes_no_floating_ip = 4
|
||||||
|
#flavor_k8s_node = "<UUID>"
|
||||||
|
|
||||||
|
# GlusterFS
|
||||||
|
# either 0 or more than one
|
||||||
|
#number_of_gfs_nodes_no_floating_ip = 0
|
||||||
|
#gfs_volume_size_in_gb = 150
|
||||||
|
# Container Linux does not support GlusterFS
|
||||||
|
#image_gfs = "<image name>"
|
||||||
|
# May be different from other nodes
|
||||||
|
#ssh_user_gfs = "ubuntu"
|
||||||
|
#flavor_gfs_node = "<UUID>"
|
||||||
|
|
||||||
|
# networking
|
||||||
|
network_name = "<network>"
|
||||||
|
external_net = "<UUID>"
|
||||||
|
floatingip_pool = "<pool>"
|
||||||
|
|
||||||
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
1
contrib/terraform/openstack/sample-inventory/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../../inventory/sample/group_vars
|
||||||
@@ -2,14 +2,30 @@ variable "cluster_name" {
|
|||||||
default = "example"
|
default = "example"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_bastions" {
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_masters" {
|
variable "number_of_k8s_masters" {
|
||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_masters_no_floating_ip" {
|
variable "number_of_k8s_masters_no_floating_ip" {
|
||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip_no_etcd" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
@@ -28,63 +44,70 @@ variable "gfs_volume_size_in_gb" {
|
|||||||
|
|
||||||
variable "public_key_path" {
|
variable "public_key_path" {
|
||||||
description = "The path of the ssh pub key"
|
description = "The path of the ssh pub key"
|
||||||
default = "~/.ssh/id_rsa.pub"
|
default = "~/.ssh/id_rsa.pub"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "image" {
|
variable "image" {
|
||||||
description = "the image to use"
|
description = "the image to use"
|
||||||
default = "ubuntu-14.04"
|
default = "ubuntu-14.04"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "image_gfs" {
|
variable "image_gfs" {
|
||||||
description = "Glance image to use for GlusterFS"
|
description = "Glance image to use for GlusterFS"
|
||||||
default = "ubuntu-16.04"
|
default = "ubuntu-16.04"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ssh_user" {
|
variable "ssh_user" {
|
||||||
description = "used to fill out tags for ansible inventory"
|
description = "used to fill out tags for ansible inventory"
|
||||||
default = "ubuntu"
|
default = "ubuntu"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "ssh_user_gfs" {
|
variable "ssh_user_gfs" {
|
||||||
description = "used to fill out tags for ansible inventory"
|
description = "used to fill out tags for ansible inventory"
|
||||||
default = "ubuntu"
|
default = "ubuntu"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "flavor_bastion" {
|
||||||
|
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||||
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "flavor_k8s_master" {
|
variable "flavor_k8s_master" {
|
||||||
default = 3
|
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||||
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "flavor_k8s_node" {
|
variable "flavor_k8s_node" {
|
||||||
default = 3
|
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "flavor_etcd" {
|
||||||
|
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||||
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "flavor_gfs_node" {
|
variable "flavor_gfs_node" {
|
||||||
default = 3
|
description = "Use 'nova flavor-list' command to see what your OpenStack instance uses for IDs"
|
||||||
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "network_name" {
|
variable "network_name" {
|
||||||
description = "name of the internal network to use"
|
description = "name of the internal network to use"
|
||||||
default = "internal"
|
default = "internal"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "dns_nameservers" {
|
||||||
|
description = "An array of DNS name server names used by hosts in this subnet."
|
||||||
|
type = "list"
|
||||||
|
default = []
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "floatingip_pool" {
|
variable "floatingip_pool" {
|
||||||
description = "name of the floating ip pool to use"
|
description = "name of the floating ip pool to use"
|
||||||
default = "external"
|
default = "external"
|
||||||
}
|
}
|
||||||
|
|
||||||
variable "username" {
|
variable "external_net" {
|
||||||
description = "Your openstack username"
|
description = "uuid of the external/public network"
|
||||||
}
|
|
||||||
|
|
||||||
variable "password" {
|
|
||||||
description = "Your openstack password"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "tenant" {
|
|
||||||
description = "Your openstack tenant/project"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "auth_url" {
|
|
||||||
description = "Your openstack auth URL"
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python2
|
||||||
#
|
#
|
||||||
# Copyright 2015 Cisco Systems, Inc.
|
# Copyright 2015 Cisco Systems, Inc.
|
||||||
#
|
#
|
||||||
@@ -70,6 +70,14 @@ def iterhosts(resources):
|
|||||||
yield parser(resource, module_name)
|
yield parser(resource, module_name)
|
||||||
|
|
||||||
|
|
||||||
|
def iterips(resources):
|
||||||
|
'''yield ip tuples of (instance_id, ip)'''
|
||||||
|
for module_name, key, resource in resources:
|
||||||
|
resource_type, name = key.split('.', 1)
|
||||||
|
if resource_type == 'openstack_compute_floatingip_associate_v2':
|
||||||
|
yield openstack_floating_ips(resource)
|
||||||
|
|
||||||
|
|
||||||
def parses(prefix):
|
def parses(prefix):
|
||||||
def inner(func):
|
def inner(func):
|
||||||
PARSERS[prefix] = func
|
PARSERS[prefix] = func
|
||||||
@@ -298,6 +306,17 @@ def softlayer_host(resource, module_name):
|
|||||||
|
|
||||||
return name, attrs, groups
|
return name, attrs, groups
|
||||||
|
|
||||||
|
def openstack_floating_ips(resource):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
attrs = {
|
||||||
|
'ip': raw_attrs['floating_ip'],
|
||||||
|
'instance_id': raw_attrs['instance_id'],
|
||||||
|
}
|
||||||
|
return attrs
|
||||||
|
|
||||||
|
def openstack_floating_ips(resource):
|
||||||
|
raw_attrs = resource['primary']['attributes']
|
||||||
|
return raw_attrs['instance_id'], raw_attrs['floating_ip']
|
||||||
|
|
||||||
@parses('openstack_compute_instance_v2')
|
@parses('openstack_compute_instance_v2')
|
||||||
@calculate_mantl_vars
|
@calculate_mantl_vars
|
||||||
@@ -343,6 +362,8 @@ def openstack_host(resource, module_name):
|
|||||||
except (KeyError, ValueError):
|
except (KeyError, ValueError):
|
||||||
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
|
||||||
|
|
||||||
|
# Handling of floating IPs has changed: https://github.com/terraform-providers/terraform-provider-openstack/blob/master/CHANGELOG.md#010-june-21-2017
|
||||||
|
|
||||||
# attrs specific to Ansible
|
# attrs specific to Ansible
|
||||||
if 'metadata.ssh_user' in raw_attrs:
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
@@ -656,6 +677,19 @@ def clc_server(resource, module_name):
|
|||||||
return name, attrs, groups
|
return name, attrs, groups
|
||||||
|
|
||||||
|
|
||||||
|
def iter_host_ips(hosts, ips):
|
||||||
|
'''Update hosts that have an entry in the floating IP list'''
|
||||||
|
for host in hosts:
|
||||||
|
host_id = host[1]['id']
|
||||||
|
if host_id in ips:
|
||||||
|
ip = ips[host_id]
|
||||||
|
host[1].update({
|
||||||
|
'access_ip_v4': ip,
|
||||||
|
'public_ipv4': ip,
|
||||||
|
'ansible_ssh_host': ip,
|
||||||
|
})
|
||||||
|
yield host
|
||||||
|
|
||||||
|
|
||||||
## QUERY TYPES
|
## QUERY TYPES
|
||||||
def query_host(hosts, target):
|
def query_host(hosts, target):
|
||||||
@@ -727,6 +761,13 @@ def main():
|
|||||||
parser.exit()
|
parser.exit()
|
||||||
|
|
||||||
hosts = iterhosts(iterresources(tfstates(args.root)))
|
hosts = iterhosts(iterresources(tfstates(args.root)))
|
||||||
|
|
||||||
|
# Perform a second pass on the file to pick up floating_ip entries to update the ip address of referenced hosts
|
||||||
|
ips = dict(iterips(iterresources(tfstates(args.root))))
|
||||||
|
|
||||||
|
if ips:
|
||||||
|
hosts = iter_host_ips(hosts, ips)
|
||||||
|
|
||||||
if args.list:
|
if args.list:
|
||||||
output = query_list(hosts)
|
output = query_list(hosts)
|
||||||
if args.nometa:
|
if args.nometa:
|
||||||
|
|||||||
@@ -27,7 +27,7 @@ not _kube-node_.
|
|||||||
|
|
||||||
There are also two special groups:
|
There are also two special groups:
|
||||||
|
|
||||||
* **calico-rr** : explained for [advanced Calico networking cases](docs/calico.md)
|
* **calico-rr** : explained for [advanced Calico networking cases](calico.md)
|
||||||
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
* **bastion** : configure a bastion host if your nodes are not directly reachable
|
||||||
|
|
||||||
Below is a complete inventory example:
|
Below is a complete inventory example:
|
||||||
@@ -66,34 +66,34 @@ kube-master
|
|||||||
Group vars and overriding variables precedence
|
Group vars and overriding variables precedence
|
||||||
----------------------------------------------
|
----------------------------------------------
|
||||||
|
|
||||||
The group variables to control main deployment options are located in the directory ``inventory/group_vars``.
|
The group variables to control main deployment options are located in the directory ``inventory/sample/group_vars``.
|
||||||
Optional variables are located in the `inventory/group_vars/all.yml`.
|
Optional variables are located in the `inventory/sample/group_vars/all.yml`.
|
||||||
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
Mandatory variables that are common for at least one role (or a node group) can be found in the
|
||||||
`inventory/group_vars/k8s-cluster.yml`.
|
`inventory/sample/group_vars/k8s-cluster.yml`.
|
||||||
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
There are also role vars for docker, rkt, kubernetes preinstall and master roles.
|
||||||
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
According to the [ansible docs](http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable),
|
||||||
those cannot be overriden from the group vars. In order to override, one should use
|
those cannot be overriden from the group vars. In order to override, one should use
|
||||||
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
the `-e ` runtime flags (most simple way) or other layers described in the docs.
|
||||||
|
|
||||||
Kargo uses only a few layers to override things (or expect them to
|
Kubespray uses only a few layers to override things (or expect them to
|
||||||
be overriden for roles):
|
be overriden for roles):
|
||||||
|
|
||||||
Layer | Comment
|
Layer | Comment
|
||||||
------|--------
|
------|--------
|
||||||
**role defaults** | provides best UX to override things for Kargo deployments
|
**role defaults** | provides best UX to override things for Kubespray deployments
|
||||||
inventory vars | Unused
|
inventory vars | Unused
|
||||||
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
**inventory group_vars** | Expects users to use ``all.yml``,``k8s-cluster.yml`` etc. to override things
|
||||||
inventory host_vars | Unused
|
inventory host_vars | Unused
|
||||||
playbook group_vars | Unuses
|
playbook group_vars | Unused
|
||||||
playbook host_vars | Unused
|
playbook host_vars | Unused
|
||||||
**host facts** | Kargo overrides for internal roles' logic, like state flags
|
**host facts** | Kubespray overrides for internal roles' logic, like state flags
|
||||||
play vars | Unused
|
play vars | Unused
|
||||||
play vars_prompt | Unused
|
play vars_prompt | Unused
|
||||||
play vars_files | Unused
|
play vars_files | Unused
|
||||||
registered vars | Unused
|
registered vars | Unused
|
||||||
set_facts | Kargo overrides those, for some places
|
set_facts | Kubespray overrides those, for some places
|
||||||
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
**role and include vars** | Provides bad UX to override things! Use extra vars to enforce
|
||||||
block vars (only for tasks in block) | Kargo overrides for internal roles' logic
|
block vars (only for tasks in block) | Kubespray overrides for internal roles' logic
|
||||||
task vars (only for the task) | Unused for roles, but only for helper scripts
|
task vars (only for the task) | Unused for roles, but only for helper scripts
|
||||||
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
**extra vars** (always win precedence) | override with ``ansible-playbook -e @foo.yml``
|
||||||
|
|
||||||
@@ -124,12 +124,12 @@ The following tags are defined in playbooks:
|
|||||||
| k8s-pre-upgrade | Upgrading K8s cluster
|
| k8s-pre-upgrade | Upgrading K8s cluster
|
||||||
| k8s-secrets | Configuring K8s certs/keys
|
| k8s-secrets | Configuring K8s certs/keys
|
||||||
| kpm | Installing K8s apps definitions with KPM
|
| kpm | Installing K8s apps definitions with KPM
|
||||||
| kube-apiserver | Configuring self-hosted kube-apiserver
|
| kube-apiserver | Configuring static pod kube-apiserver
|
||||||
| kube-controller-manager | Configuring self-hosted kube-controller-manager
|
| kube-controller-manager | Configuring static pod kube-controller-manager
|
||||||
| kubectl | Installing kubectl and bash completion
|
| kubectl | Installing kubectl and bash completion
|
||||||
| kubelet | Configuring kubelet service
|
| kubelet | Configuring kubelet service
|
||||||
| kube-proxy | Configuring self-hosted kube-proxy
|
| kube-proxy | Configuring static pod kube-proxy
|
||||||
| kube-scheduler | Configuring self-hosted kube-scheduler
|
| kube-scheduler | Configuring static pod kube-scheduler
|
||||||
| localhost | Special steps for the localhost (ansible runner)
|
| localhost | Special steps for the localhost (ansible runner)
|
||||||
| master | Configuring K8s master node role
|
| master | Configuring K8s master node role
|
||||||
| netchecker | Installing netchecker K8s app
|
| netchecker | Installing netchecker K8s app
|
||||||
@@ -153,16 +153,16 @@ Example command to filter and apply only DNS configuration tasks and skip
|
|||||||
everything else related to host OS configuration and downloading images of containers:
|
everything else related to host OS configuration and downloading images of containers:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/inventory.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml --tags preinstall,dnsmasq,facts --skip-tags=download,bootstrap-os
|
||||||
```
|
```
|
||||||
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
And this play only removes the K8s cluster DNS resolver IP from hosts' /etc/resolv.conf files:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/inventory.ini -e dns_server='' cluster.yml --tags resolvconf
|
ansible-playbook -i inventory/sample/hosts.ini -e dnsmasq_dns_server='' cluster.yml --tags resolvconf
|
||||||
```
|
```
|
||||||
And this prepares all container images localy (at the ansible runner node) without installing
|
And this prepares all container images localy (at the ansible runner node) without installing
|
||||||
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
or upgrading related stuff or trying to upload container to K8s cluster nodes:
|
||||||
```
|
```
|
||||||
ansible-playbook -i inventory/inventory.ini cluster.yml \
|
ansible-playbook -i inventory/sample/hosts.ini cluster.yml \
|
||||||
-e download_run_once=true -e download_localhost=true \
|
-e download_run_once=true -e download_localhost=true \
|
||||||
--tags download --skip-tags upload,upgrade
|
--tags download --skip-tags upload,upgrade
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
|||||||
|
|
||||||
### Vagrant
|
### Vagrant
|
||||||
|
|
||||||
* For bootstrapping with Vagrant, use box centos/atomic-host
|
* For bootstrapping with Vagrant, use box centos/atomic-host or fedora/atomic-host
|
||||||
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
* Update VagrantFile variable `local_release_dir` to `/var/vagrant/temp`.
|
||||||
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
* Update `vm_memory = 2048` and `vm_cpus = 2`
|
||||||
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
* Networking on vagrant hosts has to be brought up manually once they are booted.
|
||||||
@@ -17,6 +17,7 @@ Note: Flannel is the only plugin that has currently been tested with atomic
|
|||||||
sudo /sbin/ifup enp0s8
|
sudo /sbin/ifup enp0s8
|
||||||
```
|
```
|
||||||
|
|
||||||
* For users of vagrant-libvirt download qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
* For users of vagrant-libvirt download centos/atomic-host qcow2 format from https://wiki.centos.org/SpecialInterestGroup/Atomic/Download/
|
||||||
|
* For users of vagrant-libvirt download fedora/atomic-host qcow2 format from https://getfedora.org/en/atomic/download/
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
|
|||||||
52
docs/aws.md
52
docs/aws.md
@@ -3,8 +3,58 @@ AWS
|
|||||||
|
|
||||||
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
To deploy kubespray on [AWS](https://aws.amazon.com/) uncomment the `cloud_provider` option in `group_vars/all.yml` and set it to `'aws'`.
|
||||||
|
|
||||||
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
Prior to creating your instances, you **must** ensure that you have created IAM roles and policies for both "kubernetes-master" and "kubernetes-node". You can find the IAM policies [here](https://github.com/kubernetes-incubator/kubespray/tree/master/contrib/aws_iam/). See the [IAM Documentation](https://aws.amazon.com/documentation/iam/) if guidance is needed on how to set these up. When you bring your instances online, associate them with the respective IAM role. Nodes that are only to be used for Etcd do not need a role.
|
||||||
|
|
||||||
|
You would also need to tag the resources in your VPC accordingly for the aws provider to utilize them. Tag the subnets and all instances that kubernetes will be run on with key `kubernetes.io/cluster/$cluster_name` (`$cluster_name` must be a unique identifier for the cluster). Tag the subnets that must be targetted by external ELBs with the key `kubernetes.io/role/elb` and internal ELBs with the key `kubernetes.io/role/internal-elb`.
|
||||||
|
|
||||||
|
Make sure your VPC has both DNS Hostnames support and Private DNS enabled.
|
||||||
|
|
||||||
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
The next step is to make sure the hostnames in your `inventory` file are identical to your internal hostnames in AWS. This may look something like `ip-111-222-333-444.us-west-2.compute.internal`. You can then specify how Ansible connects to these instances with `ansible_ssh_host` and `ansible_ssh_user`.
|
||||||
|
|
||||||
You can now create your cluster!
|
You can now create your cluster!
|
||||||
|
|
||||||
|
### Dynamic Inventory ###
|
||||||
|
There is also a dynamic inventory script for AWS that can be used if desired. However, be aware that it makes some certain assumptions about how you'll create your inventory. It also does not handle all use cases and groups that we may use as part of more advanced deployments. Additions welcome.
|
||||||
|
|
||||||
|
This will produce an inventory that is passed into Ansible that looks like the following:
|
||||||
|
```
|
||||||
|
{
|
||||||
|
"_meta": {
|
||||||
|
"hostvars": {
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.3.xxx"
|
||||||
|
},
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal": {
|
||||||
|
"ansible_ssh_host": "172.31.8.xxx"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"etcd": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"k8s-cluster": {
|
||||||
|
"children": [
|
||||||
|
"kube-master",
|
||||||
|
"kube-node"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"kube-master": [
|
||||||
|
"ip-172-31-3-xxx.us-east-2.compute.internal"
|
||||||
|
],
|
||||||
|
"kube-node": [
|
||||||
|
"ip-172-31-8-xxx.us-east-2.compute.internal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Guide:
|
||||||
|
- Create instances in AWS as needed.
|
||||||
|
- Either during or after creation, add tags to the instances with a key of `kubespray-role` and a value of `kube-master`, `etcd`, or `kube-node`. You can also share roles like `kube-master, etcd`
|
||||||
|
- Copy the `kubespray-aws-inventory.py` script from `kubespray/contrib/aws_inventory` to the `kubespray/inventory` directory.
|
||||||
|
- Set the following AWS credentials and info as environment variables in your terminal:
|
||||||
|
```
|
||||||
|
export AWS_ACCESS_KEY_ID="xxxxx"
|
||||||
|
export AWS_SECRET_ACCESS_KEY="yyyyy"
|
||||||
|
export REGION="us-east-2"
|
||||||
|
```
|
||||||
|
- We will now create our cluster. There will be either one or two small changes. The first is that we will specify `-i inventory/kubespray-aws-inventory.py` as our inventory script. The other is conditional. If your AWS instances are public facing, you can set the `VPC_VISIBILITY` variable to `public` and that will result in public IP and DNS names being passed into the inventory. This causes your cluster.yml command to look like `VPC_VISIBILITY="public" ansible-playbook ... cluster.yml`
|
||||||
|
|||||||
@@ -96,7 +96,7 @@ You need to edit your inventory and add:
|
|||||||
* `cluster_id` by route reflector node/group (see details
|
* `cluster_id` by route reflector node/group (see details
|
||||||
[here](https://hub.docker.com/r/calico/routereflector/))
|
[here](https://hub.docker.com/r/calico/routereflector/))
|
||||||
|
|
||||||
Here's an example of Kargo inventory with route reflectors:
|
Here's an example of Kubespray inventory with route reflectors:
|
||||||
|
|
||||||
```
|
```
|
||||||
[all]
|
[all]
|
||||||
@@ -145,11 +145,11 @@ cluster_id="1.0.0.1"
|
|||||||
The inventory above will deploy the following topology assuming that calico's
|
The inventory above will deploy the following topology assuming that calico's
|
||||||
`global_as_num` is set to `65400`:
|
`global_as_num` is set to `65400`:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
##### Optional : Define default endpoint to host action
|
##### Optional : Define default endpoint to host action
|
||||||
|
|
||||||
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kargo) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
By default Calico blocks traffic from endpoints to the host itself by using an iptables DROP action. When using it in kubernetes the action has to be changed to RETURN (default in kubespray) or ACCEPT (see https://github.com/projectcalico/felix/issues/660 and https://github.com/projectcalico/calicoctl/issues/1389). Otherwise all network packets from pods (with hostNetwork=False) to services endpoints (with hostNetwork=True) withing the same node are dropped.
|
||||||
|
|
||||||
|
|
||||||
To re-define default action please set the following variable in your inventory:
|
To re-define default action please set the following variable in your inventory:
|
||||||
@@ -161,3 +161,11 @@ Cloud providers configuration
|
|||||||
=============================
|
=============================
|
||||||
|
|
||||||
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
Please refer to the official documentation, for example [GCE configuration](http://docs.projectcalico.org/v1.5/getting-started/docker/installation/gce) requires a security rule for calico ip-ip tunnels. Note, calico is always configured with ``ipip: true`` if the cloud provider was defined.
|
||||||
|
|
||||||
|
##### Optional : Ignore kernel's RPF check setting
|
||||||
|
|
||||||
|
By default the felix agent(calico-node) will abort if the Kernel RPF setting is not 'strict'. If you want Calico to ignore the Kernel setting:
|
||||||
|
|
||||||
|
```
|
||||||
|
calico_node_ignorelooserpf: true
|
||||||
|
```
|
||||||
|
|||||||
@@ -3,20 +3,11 @@ Cloud providers
|
|||||||
|
|
||||||
#### Provisioning
|
#### Provisioning
|
||||||
|
|
||||||
You can use kargo-cli to start new instances on cloud providers
|
You can deploy instances in your cloud environment in several different ways. Examples include Terraform, Ansible (ec2 and gce modules), and manual creation.
|
||||||
here's an example
|
|
||||||
```
|
|
||||||
kargo [aws|gce] --nodes 2 --etcd 3 --cluster-name test-smana
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Deploy kubernetes
|
#### Deploy kubernetes
|
||||||
|
|
||||||
With kargo-cli
|
With ansible-playbook command
|
||||||
```
|
|
||||||
kargo deploy [--aws|--gce] -u admin
|
|
||||||
```
|
|
||||||
|
|
||||||
Or ansible-playbook command
|
|
||||||
```
|
```
|
||||||
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
ansible-playbook -u smana -e ansible_ssh_user=admin -e cloud_provider=[aws|gce] -b --become-user=root -i inventory/single.cfg cluster.yml
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,25 +1,25 @@
|
|||||||
Kargo vs [Kops](https://github.com/kubernetes/kops)
|
Kubespray vs [Kops](https://github.com/kubernetes/kops)
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Kargo runs on bare metal and most clouds, using Ansible as its substrate for
|
Kubespray runs on bare metal and most clouds, using Ansible as its substrate for
|
||||||
provisioning and orchestration. Kops performs the provisioning and orchestration
|
provisioning and orchestration. Kops performs the provisioning and orchestration
|
||||||
itself, and as such is less flexible in deployment platforms. For people with
|
itself, and as such is less flexible in deployment platforms. For people with
|
||||||
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
familiarity with Ansible, existing Ansible deployments or the desire to run a
|
||||||
Kubernetes cluster across multiple platforms, Kargo is a good choice. Kops,
|
Kubernetes cluster across multiple platforms, Kubespray is a good choice. Kops,
|
||||||
however, is more tightly integrated with the unique features of the clouds it
|
however, is more tightly integrated with the unique features of the clouds it
|
||||||
supports so it could be a better choice if you know that you will only be using
|
supports so it could be a better choice if you know that you will only be using
|
||||||
one platform for the foreseeable future.
|
one platform for the foreseeable future.
|
||||||
|
|
||||||
Kargo vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
Kubespray vs [Kubeadm](https://github.com/kubernetes/kubeadm)
|
||||||
------------------
|
------------------
|
||||||
|
|
||||||
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle
|
||||||
management, including self-hosted layouts, dynamic discovery services and so
|
management, including self-hosted layouts, dynamic discovery services and so
|
||||||
on. Had it belong to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
on. Had it belonged to the new [operators world](https://coreos.com/blog/introducing-operators.html),
|
||||||
it would've likely been named a "Kubernetes cluster operator". Kargo however,
|
it may have been named a "Kubernetes cluster operator". Kubespray however,
|
||||||
does generic configuration management tasks from the "OS operators" ansible
|
does generic configuration management tasks from the "OS operators" ansible
|
||||||
world, plus some initial K8s clustering (with networking plugins included) and
|
world, plus some initial K8s clustering (with networking plugins included) and
|
||||||
control plane bootstrapping. Kargo [strives](https://github.com/kubernetes-incubator/kargo/issues/553)
|
control plane bootstrapping. Kubespray [strives](https://github.com/kubernetes-incubator/kubespray/issues/553)
|
||||||
to adopt kubeadm as a tool in order to consume life cycle management domain
|
to adopt kubeadm as a tool in order to consume life cycle management domain
|
||||||
knowledge from it and offload generic OS configuration things from it, which
|
knowledge from it and offload generic OS configuration things from it, which
|
||||||
hopefully benefits both sides.
|
hopefully benefits both sides.
|
||||||
|
|||||||
74
docs/contiv.md
Normal file
74
docs/contiv.md
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
Contiv
|
||||||
|
======
|
||||||
|
|
||||||
|
Here is the [Contiv documentation](http://contiv.github.io/documents/).
|
||||||
|
|
||||||
|
## Administrate Contiv
|
||||||
|
|
||||||
|
There are two ways to manage Contiv:
|
||||||
|
|
||||||
|
* a web UI managed by the api proxy service
|
||||||
|
* a CLI named `netctl`
|
||||||
|
|
||||||
|
|
||||||
|
### Interfaces
|
||||||
|
|
||||||
|
#### The Web Interface
|
||||||
|
|
||||||
|
This UI is hosted on all kubernetes master nodes. The service is available at `https://<one of your master node>:10000`.
|
||||||
|
|
||||||
|
You can configure the api proxy by overriding the following variables:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
contiv_enable_api_proxy: true
|
||||||
|
contiv_api_proxy_port: 10000
|
||||||
|
contiv_generate_certificate: true
|
||||||
|
```
|
||||||
|
|
||||||
|
The default credentials to log in are: admin/admin.
|
||||||
|
|
||||||
|
|
||||||
|
#### The Command Line Interface
|
||||||
|
|
||||||
|
The second way to modify the Contiv configuration is to use the CLI. To do this, you have to connect to the server and export an environment variable to tell netctl how to connect to the cluster:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export NETMASTER=http://127.0.0.1:9999
|
||||||
|
```
|
||||||
|
|
||||||
|
The port can be changed by overriding the following variable:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
contiv_netmaster_port: 9999
|
||||||
|
```
|
||||||
|
|
||||||
|
The CLI doesn't use the authentication process needed by the web interface.
|
||||||
|
|
||||||
|
|
||||||
|
### Network configuration
|
||||||
|
|
||||||
|
The default configuration uses VXLAN to create an overlay. Two networks are created by default:
|
||||||
|
|
||||||
|
* `contivh1`: an infrastructure network. It allows nodes to access the pods IPs. It is mandatory in a Kubernetes environment that uses VXLAN.
|
||||||
|
* `default-net` : the default network that hosts pods.
|
||||||
|
|
||||||
|
You can change the default network configuration by overriding the `contiv_networks` variable.
|
||||||
|
|
||||||
|
The default forward mode is set to routing:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
contiv_fwd_mode: routing
|
||||||
|
```
|
||||||
|
|
||||||
|
The following is an example of how you can use VLAN instead of VXLAN:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
contiv_fwd_mode: bridge
|
||||||
|
contiv_vlan_interface: eth0
|
||||||
|
contiv_networks:
|
||||||
|
- name: default-net
|
||||||
|
subnet: "{{ kube_pods_subnet }}"
|
||||||
|
gateway: "{{ kube_pods_subnet|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
||||||
|
encap: vlan
|
||||||
|
pkt_tag: 10
|
||||||
|
```
|
||||||
@@ -1,16 +1,14 @@
|
|||||||
CoreOS bootstrap
|
CoreOS bootstrap
|
||||||
===============
|
===============
|
||||||
|
|
||||||
Example with **kargo-cli**:
|
Example with Ansible:
|
||||||
|
|
||||||
```
|
|
||||||
kargo deploy --gce --coreos
|
|
||||||
```
|
|
||||||
|
|
||||||
Or with Ansible:
|
|
||||||
|
|
||||||
Before running the cluster playbook you must satisfy the following requirements:
|
Before running the cluster playbook you must satisfy the following requirements:
|
||||||
|
|
||||||
* On each CoreOS nodes a writable directory **/opt/bin** (~400M disk space)
|
General CoreOS Pre-Installation Notes:
|
||||||
|
- You should set the bootstrap_os variable to `coreos`
|
||||||
|
- Ensure that the bin_dir is set to `/opt/bin`
|
||||||
|
- ansible_python_interpreter should be `/opt/bin/python`. This will be laid down by the bootstrap task.
|
||||||
|
- The default resolvconf_mode setting of `docker_dns` **does not** work for CoreOS. This is because we do not edit the systemd service file for docker on CoreOS nodes. Instead, just use the `host_resolvconf` mode. It should work out of the box.
|
||||||
|
|
||||||
Then you can proceed to [cluster deployment](#run-deployment)
|
Then you can proceed to [cluster deployment](#run-deployment)
|
||||||
|
|||||||
38
docs/debian.md
Normal file
38
docs/debian.md
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
Debian Jessie
|
||||||
|
===============
|
||||||
|
|
||||||
|
Debian Jessie installation Notes:
|
||||||
|
|
||||||
|
- Add
|
||||||
|
|
||||||
|
```GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1"```
|
||||||
|
|
||||||
|
to /etc/default/grub. Then update with
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo update-grub
|
||||||
|
sudo update-grub2
|
||||||
|
sudo reboot
|
||||||
|
```
|
||||||
|
|
||||||
|
- Add the [backports](https://backports.debian.org/Instructions/) which contain Systemd 2.30 and update Systemd.
|
||||||
|
|
||||||
|
```apt-get -t jessie-backports install systemd```
|
||||||
|
|
||||||
|
(Necessary because the default Systemd version (2.15) does not support the "Delegate" directive in service files)
|
||||||
|
|
||||||
|
- Add the Ansible repository and install Ansible to get a proper version
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo add-apt-repository ppa:ansible/ansible
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install ansible
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- Install Jinja2 and Python-Netaddr
|
||||||
|
|
||||||
|
```sudo apt-get install python-jinja2=2.8-1~bpo8+1 python-netaddr```
|
||||||
|
|
||||||
|
|
||||||
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
K8s DNS stack by Kargo
|
K8s DNS stack by Kubespray
|
||||||
======================
|
======================
|
||||||
|
|
||||||
For K8s cluster nodes, kargo configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
For K8s cluster nodes, Kubespray configures a [Kubernetes DNS](http://kubernetes.io/docs/admin/dns/)
|
||||||
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
[cluster add-on](http://releases.k8s.io/master/cluster/addons/README.md)
|
||||||
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
to serve as an authoritative DNS server for a given ``dns_domain`` and its
|
||||||
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
``svc, default.svc`` default subdomains (a total of ``ndots: 5`` max levels).
|
||||||
@@ -44,13 +44,13 @@ DNS servers to be added *after* the cluster DNS. Used by all ``resolvconf_mode``
|
|||||||
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
DNS servers in early cluster deployment when no cluster DNS is available yet. These are also added as upstream
|
||||||
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
DNS servers used by ``dnsmasq`` (when deployed with ``dns_mode: dnsmasq_kubedns``).
|
||||||
|
|
||||||
DNS modes supported by kargo
|
DNS modes supported by Kubespray
|
||||||
============================
|
============================
|
||||||
|
|
||||||
You can modify how kargo sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
You can modify how Kubespray sets up DNS for your cluster with the variables ``dns_mode`` and ``resolvconf_mode``.
|
||||||
|
|
||||||
## dns_mode
|
## dns_mode
|
||||||
``dns_mode`` configures how kargo will setup cluster DNS. There are three modes available:
|
``dns_mode`` configures how Kubespray will setup cluster DNS. There are four modes available:
|
||||||
|
|
||||||
#### dnsmasq_kubedns (default)
|
#### dnsmasq_kubedns (default)
|
||||||
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
This installs an additional dnsmasq DaemonSet which gives more flexibility and lifts some
|
||||||
@@ -62,12 +62,26 @@ other queries are forwardet to the nameservers found in ``upstream_dns_servers``
|
|||||||
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use kubedns/skydns for
|
||||||
all queries.
|
all queries.
|
||||||
|
|
||||||
|
#### coredns
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||||
|
all queries.
|
||||||
|
|
||||||
|
#### coredns_dual
|
||||||
|
This does not install the dnsmasq DaemonSet and instructs kubelet to directly use CoreDNS for
|
||||||
|
all queries. It will also deploy a secondary CoreDNS stack
|
||||||
|
|
||||||
|
#### manual
|
||||||
|
This does not install dnsmasq or kubedns, but allows you to specify
|
||||||
|
`manual_dns_server`, which will be configured on nodes for handling Pod DNS.
|
||||||
|
Use this method if you plan to install your own DNS server in the cluster after
|
||||||
|
initial deployment.
|
||||||
|
|
||||||
#### none
|
#### none
|
||||||
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
This does not install any of dnsmasq and kubedns/skydns. This basically disables cluster DNS completely and
|
||||||
leaves you with a non functional cluster.
|
leaves you with a non functional cluster.
|
||||||
|
|
||||||
## resolvconf_mode
|
## resolvconf_mode
|
||||||
``resolvconf_mode`` configures how kargo will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
``resolvconf_mode`` configures how Kubespray will setup DNS for ``hostNetwork: true`` PODs and non-k8s containers.
|
||||||
There are three modes available:
|
There are three modes available:
|
||||||
|
|
||||||
#### docker_dns (default)
|
#### docker_dns (default)
|
||||||
@@ -100,7 +114,7 @@ used as a backup nameserver. After cluster DNS is running, all queries will be a
|
|||||||
servers, which in turn will forward queries to the system nameserver if required.
|
servers, which in turn will forward queries to the system nameserver if required.
|
||||||
|
|
||||||
#### host_resolvconf
|
#### host_resolvconf
|
||||||
This activates the classic kargo behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
This activates the classic Kubespray behaviour that modifies the hosts ``/etc/resolv.conf`` file and dhclient
|
||||||
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
configuration to point to the cluster dns server (either dnsmasq or kubedns, depending on dns_mode).
|
||||||
|
|
||||||
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
As cluster DNS is not available on early deployment stage, this mode is split into 2 stages. In the first
|
||||||
@@ -120,7 +134,7 @@ cluster service names.
|
|||||||
Limitations
|
Limitations
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
* Kargo has yet ways to configure Kubedns addon to forward requests SkyDns can
|
* Kubespray has yet ways to configure Kubedns addon to forward requests SkyDns can
|
||||||
not answer with authority to arbitrary recursive resolvers. This task is left
|
not answer with authority to arbitrary recursive resolvers. This task is left
|
||||||
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
for future. See [official SkyDns docs](https://github.com/skynetservices/skydns)
|
||||||
for details.
|
for details.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Downloading binaries and containers
|
Downloading binaries and containers
|
||||||
===================================
|
===================================
|
||||||
|
|
||||||
Kargo supports several download/upload modes. The default is:
|
Kubespray supports several download/upload modes. The default is:
|
||||||
|
|
||||||
* Each node downloads binaries and container images on its own, which is
|
* Each node downloads binaries and container images on its own, which is
|
||||||
``download_run_once: False``.
|
``download_run_once: False``.
|
||||||
|
|||||||
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 40 KiB |
@@ -23,13 +23,6 @@ ip a show dev flannel.1
|
|||||||
valid_lft forever preferred_lft forever
|
valid_lft forever preferred_lft forever
|
||||||
```
|
```
|
||||||
|
|
||||||
* Docker must be configured with a bridge ip in the flannel subnet.
|
|
||||||
|
|
||||||
```
|
|
||||||
ps aux | grep docker
|
|
||||||
root 20196 1.7 2.7 1260616 56840 ? Ssl 10:18 0:07 /usr/bin/docker daemon --bip=10.233.16.1/24 --mtu=1450
|
|
||||||
```
|
|
||||||
|
|
||||||
* Try to run a container and check its ip address
|
* Try to run a container and check its ip address
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,45 +1,26 @@
|
|||||||
Getting started
|
Getting started
|
||||||
===============
|
===============
|
||||||
|
|
||||||
The easiest way to run the deployement is to use the **kargo-cli** tool.
|
|
||||||
A complete documentation can be found in its [github repository](https://github.com/kubespray/kargo-cli).
|
|
||||||
|
|
||||||
Here is a simple example on AWS:
|
|
||||||
|
|
||||||
* Create instances and generate the inventory
|
|
||||||
|
|
||||||
```
|
|
||||||
kargo aws --instances 3
|
|
||||||
```
|
|
||||||
|
|
||||||
* Run the deployment
|
|
||||||
|
|
||||||
```
|
|
||||||
kargo deploy --aws -u centos -n calico
|
|
||||||
```
|
|
||||||
|
|
||||||
Building your own inventory
|
Building your own inventory
|
||||||
---------------------------
|
---------------------------
|
||||||
|
|
||||||
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
Ansible inventory can be stored in 3 formats: YAML, JSON, or INI-like. There is
|
||||||
an example inventory located
|
an example inventory located
|
||||||
[here](https://github.com/kubernetes-incubator/kargo/blob/master/inventory/inventory.example).
|
[here](https://github.com/kubernetes-incubator/kubespray/blob/master/inventory/sample/hosts.ini).
|
||||||
|
|
||||||
You can use an
|
You can use an
|
||||||
[inventory generator](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py)
|
[inventory generator](https://github.com/kubernetes-incubator/kubespray/blob/master/contrib/inventory_builder/inventory.py)
|
||||||
to create or modify an Ansible inventory. Currently, it is limited in
|
to create or modify an Ansible inventory. Currently, it is limited in
|
||||||
functionality and is only use for making a basic Kargo cluster, but it does
|
functionality and is only used for configuring a basic Kubespray cluster inventory, but it does
|
||||||
support creating large clusters. It now supports
|
support creating inventory file for large clusters as well. It now supports
|
||||||
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
separated ETCD and Kubernetes master roles from node role if the size exceeds a
|
||||||
certain threshold. Run inventory.py help for more information.
|
certain threshold. Run `python3 contrib/inventory_builder/inventory.py help` help for more information.
|
||||||
|
|
||||||
Example inventory generator usage:
|
Example inventory generator usage:
|
||||||
|
|
||||||
```
|
cp -r inventory/sample inventory/mycluster
|
||||||
cp -r inventory my_inventory
|
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
||||||
declare -a IPS=(10.10.1.3 10.10.1.4 10.10.1.5)
|
CONFIG_FILE=inventory/mycluster/hosts.ini python3 contrib/inventory_builder/inventory.py ${IPS[@]}
|
||||||
CONFIG_FILE=my_inventory/inventory.cfg python3 contrib/inventory_builder/inventory.py ${IPS}
|
|
||||||
```
|
|
||||||
|
|
||||||
Starting custom deployment
|
Starting custom deployment
|
||||||
--------------------------
|
--------------------------
|
||||||
@@ -47,11 +28,90 @@ Starting custom deployment
|
|||||||
Once you have an inventory, you may want to customize deployment data vars
|
Once you have an inventory, you may want to customize deployment data vars
|
||||||
and start the deployment:
|
and start the deployment:
|
||||||
|
|
||||||
**IMPORTANT: Edit my_inventory/groups_vars/*.yaml to override data vars**
|
**IMPORTANT**: Edit my\_inventory/groups\_vars/\*.yaml to override data vars:
|
||||||
|
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini cluster.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
|
See more details in the [ansible guide](ansible.md).
|
||||||
|
|
||||||
|
Adding nodes
|
||||||
|
------------
|
||||||
|
|
||||||
|
You may want to add **worker** nodes to your existing cluster. This can be done by re-running the `cluster.yml` playbook, or you can target the bare minimum needed to get kubelet installed on the worker and talking to your masters. This is especially helpful when doing something like autoscaling your clusters.
|
||||||
|
|
||||||
|
- Add the new worker node to your inventory under kube-node (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
|
- Run the ansible-playbook command, substituting `scale.yml` for `cluster.yml`:
|
||||||
|
|
||||||
|
ansible-playbook -i inventory/mycluster/hosts.ini scale.yml -b -v \
|
||||||
|
--private-key=~/.ssh/private_key
|
||||||
|
|
||||||
|
Remove nodes
|
||||||
|
------------
|
||||||
|
|
||||||
|
You may want to remove **worker** nodes to your existing cluster. This can be done by re-running the `remove-node.yml` playbook. First, all nodes will be drained, then stop some kubernetes services and delete some certificates, and finally execute the kubectl command to delete these nodes. This can be combined with the add node function, This is generally helpful when doing something like autoscaling your clusters. Of course if a node is not working, you can remove the node and install it again.
|
||||||
|
|
||||||
|
- Add worker nodes to the list under kube-node if you want to delete them (or utilize a [dynamic inventory](https://docs.ansible.com/ansible/intro_dynamic_inventory.html)).
|
||||||
|
- Run the ansible-playbook command, substituting `remove-node.yml`:
|
||||||
```
|
```
|
||||||
ansible-playbook -i my_inventory/inventory.cfg cluster.yml -b -v \
|
ansible-playbook -i inventory/mycluster/hosts.ini remove-node.yml -b -v \
|
||||||
--private-key=~/.ssh/private_key
|
--private-key=~/.ssh/private_key
|
||||||
```
|
```
|
||||||
|
|
||||||
See more details in the [ansible guide](ansible.md).
|
Connecting to Kubernetes
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
By default, Kubespray configures kube-master hosts with insecure access to
|
||||||
|
kube-apiserver via port 8080. A kubeconfig file is not necessary in this case,
|
||||||
|
because kubectl will use <http://localhost:8080> to connect. The kubeconfig files
|
||||||
|
generated will point to localhost (on kube-masters) and kube-node hosts will
|
||||||
|
connect either to a localhost nginx proxy or to a loadbalancer if configured.
|
||||||
|
More details on this process are in the [HA guide](ha-mode.md).
|
||||||
|
|
||||||
|
Kubespray permits connecting to the cluster remotely on any IP of any
|
||||||
|
kube-master host on port 6443 by default. However, this requires
|
||||||
|
authentication. One could generate a kubeconfig based on one installed
|
||||||
|
kube-master hosts (needs improvement) or connect with a username and password.
|
||||||
|
By default, a user with admin rights is created, named `kube`.
|
||||||
|
The password can be viewed after deployment by looking at the file
|
||||||
|
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||||
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
|
file yourself.
|
||||||
|
|
||||||
|
For more information on kubeconfig and accessing a Kubernetes cluster, refer to
|
||||||
|
the Kubernetes [documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/).
|
||||||
|
|
||||||
|
Accessing Kubernetes Dashboard
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
As of kubernetes-dashboard v1.7.x:
|
||||||
|
|
||||||
|
- New login options that use apiserver auth proxying of token/basic/kubeconfig by default
|
||||||
|
- Requires RBAC in authorization\_modes
|
||||||
|
- Only serves over https
|
||||||
|
- No longer available at <https://first_master:6443/ui> until apiserver is updated with the https proxy URL
|
||||||
|
|
||||||
|
If the variable `dashboard_enabled` is set (default is true), then you can access the Kubernetes Dashboard at the following URL, You will be prompted for credentials:
|
||||||
|
<https://first_master:6443/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||||
|
|
||||||
|
Or you can run 'kubectl proxy' from your local machine to access dashboard in your browser from:
|
||||||
|
<http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/#!/login>
|
||||||
|
|
||||||
|
It is recommended to access dashboard from behind a gateway (like Ingress Controller) that enforces an authentication token. Details and other access options here: <https://github.com/kubernetes/dashboard/wiki/Accessing-Dashboard---1.7.X-and-above>
|
||||||
|
|
||||||
|
Accessing Kubernetes API
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
The main client of Kubernetes is `kubectl`. It is installed on each kube-master
|
||||||
|
host and can optionally be configured on your ansible host by setting
|
||||||
|
`kubectl_localhost: true` and `kubeconfig_localhost: true` in the configuration:
|
||||||
|
|
||||||
|
- If `kubectl_localhost` enabled, `kubectl` will download onto `/usr/local/bin/` and setup with bash completion. A helper script `inventory/mycluster/artifacts/kubectl.sh` also created for setup with below `admin.conf`.
|
||||||
|
- If `kubeconfig_localhost` enabled `admin.conf` will appear in the `inventory/mycluster/artifacts/` directory after deployment.
|
||||||
|
|
||||||
|
You can see a list of nodes by running the following commands:
|
||||||
|
|
||||||
|
cd inventory/mycluster/artifacts
|
||||||
|
./kubectl.sh get nodes
|
||||||
|
|
||||||
|
If desired, copy admin.conf to ~/.kube/config.
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ Etcd
|
|||||||
----
|
----
|
||||||
|
|
||||||
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
The `etcd_access_endpoint` fact provides an access pattern for clients. And the
|
||||||
`etcd_multiaccess` (defaults to `True`) group var controlls that behavior.
|
`etcd_multiaccess` (defaults to `True`) group var controls that behavior.
|
||||||
It makes deployed components to access the etcd cluster members
|
It makes deployed components to access the etcd cluster members
|
||||||
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
directly: `http://ip1:2379, http://ip2:2379,...`. This mode assumes the clients
|
||||||
do a loadbalancing and handle HA for connections.
|
do a loadbalancing and handle HA for connections.
|
||||||
@@ -22,24 +22,26 @@ Kube-apiserver
|
|||||||
--------------
|
--------------
|
||||||
|
|
||||||
K8s components require a loadbalancer to access the apiservers via a reverse
|
K8s components require a loadbalancer to access the apiservers via a reverse
|
||||||
proxy. Kargo includes support for an nginx-based proxy that resides on each
|
proxy. Kubespray includes support for an nginx-based proxy that resides on each
|
||||||
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
non-master Kubernetes node. This is referred to as localhost loadbalancing. It
|
||||||
is less efficient than a dedicated load balancer because it creates extra
|
is less efficient than a dedicated load balancer because it creates extra
|
||||||
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
health checks on the Kubernetes apiserver, but is more practical for scenarios
|
||||||
where an external LB or virtual IP management is inconvenient. This option is
|
where an external LB or virtual IP management is inconvenient. This option is
|
||||||
configured by the variable `loadbalancer_apiserver_localhost` (defaults to `True`).
|
configured by the variable `loadbalancer_apiserver_localhost` (defaults to
|
||||||
You may also define the port the local internal loadbalancer users by changing,
|
`True`. Or `False`, if there is an external `loadbalancer_apiserver` defined).
|
||||||
`nginx_kube_apiserver_port`. This defaults to the value of `kube_apiserver_port`.
|
You may also define the port the local internal loadbalancer uses by changing,
|
||||||
It is also import to note that Kargo will only configure kubelet and kube-proxy
|
`nginx_kube_apiserver_port`. This defaults to the value of
|
||||||
on non-master nodes to use the local internal loadbalancer.
|
`kube_apiserver_port`. It is also important to note that Kubespray will only
|
||||||
|
configure kubelet and kube-proxy on non-master nodes to use the local internal
|
||||||
|
loadbalancer.
|
||||||
|
|
||||||
If you choose to NOT use the local internal loadbalancer, you will need to configure
|
If you choose to NOT use the local internal loadbalancer, you will need to
|
||||||
your own loadbalancer to achieve HA. Note that deploying a loadbalancer is up to
|
configure your own loadbalancer to achieve HA. Note that deploying a
|
||||||
a user and is not covered by ansible roles in Kargo. By default, it only configures
|
loadbalancer is up to a user and is not covered by ansible roles in Kubespray.
|
||||||
a non-HA endpoint, which points to the `access_ip` or IP address of the first server
|
By default, it only configures a non-HA endpoint, which points to the
|
||||||
node in the `kube-master` group. It can also configure clients to use endpoints
|
`access_ip` or IP address of the first server node in the `kube-master` group.
|
||||||
for a given loadbalancer type. The following diagram shows how traffic to the
|
It can also configure clients to use endpoints for a given loadbalancer type.
|
||||||
apiserver is directed.
|
The following diagram shows how traffic to the apiserver is directed.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -66,40 +68,72 @@ listen kubernetes-apiserver-https
|
|||||||
balance roundrobin
|
balance roundrobin
|
||||||
```
|
```
|
||||||
|
|
||||||
And the corresponding example global vars config:
|
Note: That's an example config managed elsewhere outside of Kubespray.
|
||||||
|
|
||||||
|
And the corresponding example global vars for such a "cluster-aware"
|
||||||
|
external LB with the cluster API access modes configured in Kubespray:
|
||||||
```
|
```
|
||||||
apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
apiserver_loadbalancer_domain_name: "my-apiserver-lb.example.com"
|
||||||
loadbalancer_apiserver:
|
loadbalancer_apiserver:
|
||||||
address: <VIP>
|
address: <VIP>
|
||||||
port: 8383
|
port: 8383
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Note: The default kubernetes apiserver configuration binds to all interfaces,
|
||||||
|
so you will need to use a different port for the vip from that the API is
|
||||||
|
listening on, or set the `kube_apiserver_bind_address` so that the API only
|
||||||
|
listens on a specific interface (to avoid conflict with haproxy binding the
|
||||||
|
port on the VIP adddress)
|
||||||
|
|
||||||
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
This domain name, or default "lb-apiserver.kubernetes.local", will be inserted
|
||||||
into the `/etc/hosts` file of all servers in the `k8s-cluster` group. Note that
|
into the `/etc/hosts` file of all servers in the `k8s-cluster` group and wired
|
||||||
|
into the generated self-signed TLS/SSL certificates as well. Note that
|
||||||
the HAProxy service should as well be HA and requires a VIP management, which
|
the HAProxy service should as well be HA and requires a VIP management, which
|
||||||
is out of scope of this doc. Specifying an external LB overrides any internal
|
is out of scope of this doc.
|
||||||
localhost LB configuration.
|
|
||||||
|
|
||||||
Note: In order to achieve HA for HAProxy instances, those must be running on
|
There is a special case for an internal and an externally configured (not with
|
||||||
the each node in the `k8s-cluster` group as well, but require no VIP, thus
|
Kubespray) LB used simultaneously. Keep in mind that the cluster is not aware
|
||||||
no VIP management.
|
of such an external LB and you need no to specify any configuration variables
|
||||||
|
for it.
|
||||||
|
|
||||||
Access endpoints are evaluated automagically, as the following:
|
Note: TLS/SSL termination for externally accessed API endpoints' will **not**
|
||||||
|
be covered by Kubespray for that case. Make sure your external LB provides it.
|
||||||
|
Alternatively you may specify an externally load balanced VIPs in the
|
||||||
|
`supplementary_addresses_in_ssl_keys` list. Then, kubespray will add them into
|
||||||
|
the generated cluster certifactes as well.
|
||||||
|
|
||||||
| Endpoint type | kube-master | non-master |
|
Aside of that specific case, the `loadbalancer_apiserver` considered mutually
|
||||||
|------------------------------|---------------|---------------------|
|
exclusive to `loadbalancer_apiserver_localhost`.
|
||||||
| Local LB (default) | http://lc:p | https://lc:nsp |
|
|
||||||
| External LB, no internal | https://lb:lp | https://lb:lp |
|
Access API endpoints are evaluated automagically, as the following:
|
||||||
| No ext/int LB | http://lc:p | https://m[0].aip:sp |
|
|
||||||
|
| Endpoint type | kube-master | non-master | external |
|
||||||
|
|------------------------------|----------------|---------------------|---------------------|
|
||||||
|
| Local LB (default) | https://bip:sp | https://lc:nsp | https://m[0].aip:sp |
|
||||||
|
| Local LB + Unmanaged here LB | https://bip:sp | https://lc:nsp | https://ext |
|
||||||
|
| External LB, no internal | https://bip:sp | https://lb:lp | https://lb:lp |
|
||||||
|
| No ext/int LB | https://bip:sp | https://m[0].aip:sp | https://m[0].aip:sp |
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
* `m[0]` - the first node in the `kube-master` group;
|
* `m[0]` - the first node in the `kube-master` group;
|
||||||
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
* `lb` - LB FQDN, `apiserver_loadbalancer_domain_name`;
|
||||||
|
* `ext` - Externally load balanced VIP:port and FQDN, not managed by Kubespray;
|
||||||
* `lc` - localhost;
|
* `lc` - localhost;
|
||||||
* `p` - insecure port, `kube_apiserver_insecure_port`
|
* `bip` - a custom bind IP or localhost for the default bind IP '0.0.0.0';
|
||||||
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`;
|
* `nsp` - nginx secure port, `nginx_kube_apiserver_port`, defers to `sp`;
|
||||||
* `sp` - secure port, `kube_apiserver_port`;
|
* `sp` - secure port, `kube_apiserver_port`;
|
||||||
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
* `lp` - LB port, `loadbalancer_apiserver.port`, defers to the secure port;
|
||||||
* `ip` - the node IP, defers to the ansible IP;
|
* `ip` - the node IP, defers to the ansible IP;
|
||||||
* `aip` - `access_ip`, defers to the ip.
|
* `aip` - `access_ip`, defers to the ip.
|
||||||
|
|
||||||
|
A second and a third column represent internal cluster access modes. The last
|
||||||
|
column illustrates an example URI to access the cluster APIs externally.
|
||||||
|
Kubespray has nothing to do with it, this is informational only.
|
||||||
|
|
||||||
|
As you can see, the masters' internal API endpoints are always
|
||||||
|
contacted via the local bind IP, which is `https://bip:sp`.
|
||||||
|
|
||||||
|
**Note** that for some cases, like healthchecks of applications deployed by
|
||||||
|
Kubespray, the masters' APIs are accessed via the insecure endpoint, which
|
||||||
|
consists of the local `kube_apiserver_insecure_bind_address` and
|
||||||
|
`kube_apiserver_insecure_port`.
|
||||||
|
|||||||
121
docs/integration.md
Normal file
121
docs/integration.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Kubespray (kargo) in own ansible playbooks repo
|
||||||
|
|
||||||
|
1. Fork [kubespray repo](https://github.com/kubernetes-incubator/kubespray) to your personal/organisation account on github.
|
||||||
|
Note:
|
||||||
|
* All forked public repos at github will be also public, so **never commit sensitive data to your public forks**.
|
||||||
|
* List of all forked repos could be retrieved from github page of original project.
|
||||||
|
|
||||||
|
2. Add **forked repo** as submodule to desired folder in your existent ansible repo(for example 3d/kubespray):
|
||||||
|
```git submodule add https://github.com/YOUR_GITHUB/kubespray.git kubespray```
|
||||||
|
Git will create _.gitmodules_ file in your existent ansible repo:
|
||||||
|
```
|
||||||
|
[submodule "3d/kubespray"]
|
||||||
|
path = 3d/kubespray
|
||||||
|
url = https://github.com/YOUR_GITHUB/kubespray.git
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Configure git to show submodule status:
|
||||||
|
```git config --global status.submoduleSummary true```
|
||||||
|
|
||||||
|
4. Add *original* kubespray repo as upstream:
|
||||||
|
```git remote add upstream https://github.com/kubernetes-incubator/kubespray.git```
|
||||||
|
|
||||||
|
5. Sync your master branch with upstream:
|
||||||
|
```
|
||||||
|
git checkout master
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
|
||||||
|
6. Create a new branch which you will use in your working environment:
|
||||||
|
```git checkout -b work```
|
||||||
|
***Never*** use master branch of your repository for your commits.
|
||||||
|
|
||||||
|
7. Modify path to library and roles in your ansible.cfg file (role naming should be uniq, you may have to rename your existent roles if they have same names as kubespray project):
|
||||||
|
```
|
||||||
|
...
|
||||||
|
library = 3d/kubespray/library/
|
||||||
|
roles_path = 3d/kubespray/roles/
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
8. Copy and modify configs from kubespray `group_vars` folder to corresponging `group_vars` folder in your existent project.
|
||||||
|
You could rename *all.yml* config to something else, i.e. *kubespray.yml* and create corresponding group in your inventory file, which will include all hosts groups related to kubernetes setup.
|
||||||
|
|
||||||
|
9. Modify your ansible inventory file by adding mapping of your existent groups (if any) to kubespray naming.
|
||||||
|
For example:
|
||||||
|
```
|
||||||
|
...
|
||||||
|
#Kargo groups:
|
||||||
|
[kube-node:children]
|
||||||
|
kubenode
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kubernetes
|
||||||
|
|
||||||
|
[etcd:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[kube-master:children]
|
||||||
|
kubemaster
|
||||||
|
kubemaster-ha
|
||||||
|
|
||||||
|
[vault:children]
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
[kubespray:children]
|
||||||
|
kubernetes
|
||||||
|
```
|
||||||
|
* Last entry here needed to apply kubespray.yml config file, renamed from all.yml of kubespray project.
|
||||||
|
|
||||||
|
10. Now you can include kargo tasks in you existent playbooks by including cluster.yml file:
|
||||||
|
```
|
||||||
|
- name: Include kargo tasks
|
||||||
|
include: 3d/kubespray/cluster.yml
|
||||||
|
```
|
||||||
|
Or your could copy separate tasks from cluster.yml into your ansible repository.
|
||||||
|
|
||||||
|
11. Commit changes to your ansible repo. Keep in mind, that submodule folder is just a link to the git commit hash of your forked repo.
|
||||||
|
When you update your "work" branch you need to commit changes to ansible repo as well.
|
||||||
|
Other members of your team should use ```git submodule sync```, ```git submodule update --init``` to get actual code from submodule.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
If you made useful changes or fixed a bug in existent kubespray repo, use this flow for PRs to original kubespray repo.
|
||||||
|
|
||||||
|
0. Sign the [CNCF CLA](https://git.k8s.io/community/CLA.md).
|
||||||
|
|
||||||
|
1. Change working directory to git submodule directory (3d/kubespray).
|
||||||
|
|
||||||
|
2. Setup desired user.name and user.email for submodule.
|
||||||
|
If kubespray is only one submodule in your repo you could use something like:
|
||||||
|
```git submodule foreach --recursive 'git config user.name "First Last" && git config user.email "your-email-addres@used.for.cncf"'```
|
||||||
|
|
||||||
|
3. Sync with upstream master:
|
||||||
|
```
|
||||||
|
git fetch upstream
|
||||||
|
git merge upstream/master
|
||||||
|
git push origin master
|
||||||
|
```
|
||||||
|
4. Create new branch for the specific fixes that you want to contribute:
|
||||||
|
```git checkout -b fixes-name-date-index```
|
||||||
|
Branch name should be self explaining to you, adding date and/or index will help you to track/delete your old PRs.
|
||||||
|
|
||||||
|
5. Find git hash of your commit in "work" repo and apply it to newly created "fix" repo:
|
||||||
|
```
|
||||||
|
git cherry-pick <COMMIT_HASH>
|
||||||
|
```
|
||||||
|
6. If your have several temporary-stage commits - squash them using [```git rebase -i```](http://eli.thegreenplace.net/2014/02/19/squashing-github-pull-requests-into-a-single-commit)
|
||||||
|
Also you could use interactive rebase (```git rebase -i HEAD~10```) to delete commits which you don't want to contribute into original repo.
|
||||||
|
|
||||||
|
7. When your changes is in place, you need to check upstream repo one more time because it could be changed during your work.
|
||||||
|
Check that you're on correct branch:
|
||||||
|
```git status```
|
||||||
|
And pull changes from upstream (if any):
|
||||||
|
```git pull --rebase upstream master```
|
||||||
|
|
||||||
|
8. Now push your changes to your **fork** repo with ```git push```. If your branch doesn't exists on github, git will propose you to use something like ```git push --set-upstream origin fixes-name-date-index```.
|
||||||
|
|
||||||
|
9. Open you forked repo in browser, on the main page you will see proposition to create pull request for your newly created branch. Check proposed diff of your PR. If something is wrong you could safely delete "fix" branch on github using ```git push origin --delete fixes-name-date-index```, ```git branch -D fixes-name-date-index``` and start whole process from the beginning.
|
||||||
|
If everything is fine - add description about your changes (what they do and why they're needed) and confirm pull request creation.
|
||||||
@@ -3,8 +3,7 @@ Large deployments of K8s
|
|||||||
|
|
||||||
For a large scaled deployments, consider the following configuration changes:
|
For a large scaled deployments, consider the following configuration changes:
|
||||||
|
|
||||||
* Tune [ansible settings]
|
* Tune [ansible settings](http://docs.ansible.com/ansible/intro_configuration.html)
|
||||||
(http://docs.ansible.com/ansible/intro_configuration.html)
|
|
||||||
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
for `forks` and `timeout` vars to fit large numbers of nodes being deployed.
|
||||||
|
|
||||||
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
* Override containers' `foo_image_repo` vars to point to intranet registry.
|
||||||
@@ -34,6 +33,9 @@ For a large scaled deployments, consider the following configuration changes:
|
|||||||
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
``kube_controller_pod_eviction_timeout`` for better Kubernetes reliability.
|
||||||
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
Check out [Kubernetes Reliability](kubernetes-reliability.md)
|
||||||
|
|
||||||
|
* Tune network prefix sizes. Those are ``kube_network_node_prefix``,
|
||||||
|
``kube_service_addresses`` and ``kube_pods_subnet``.
|
||||||
|
|
||||||
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
* Add calico-rr nodes if you are deploying with Calico or Canal. Nodes recover
|
||||||
from host/network interruption much quicker with calico-rr. Note that
|
from host/network interruption much quicker with calico-rr. Note that
|
||||||
calico-rr role must be on a host without kube-master or kube-node role (but
|
calico-rr role must be on a host without kube-master or kube-node role (but
|
||||||
@@ -44,5 +46,8 @@ For a large scaled deployments, consider the following configuration changes:
|
|||||||
section of the Getting started guide for tips on creating a large scale
|
section of the Getting started guide for tips on creating a large scale
|
||||||
Ansible inventory.
|
Ansible inventory.
|
||||||
|
|
||||||
|
* Override the ``etcd_events_cluster_setup: true`` store events in a separate
|
||||||
|
dedicated etcd instance.
|
||||||
|
|
||||||
For example, when deploying 200 nodes, you may want to run ansible with
|
For example, when deploying 200 nodes, you may want to run ansible with
|
||||||
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
``--forks=50``, ``--timeout=600`` and define the ``retry_stagger: 60``.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Network Checker Application
|
Network Checker Application
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
With the ``deploy_netchecker`` var enabled (defaults to false), Kargo deploys a
|
With the ``deploy_netchecker`` var enabled (defaults to false), Kubespray deploys a
|
||||||
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
Network Checker Application from the 3rd side `l23network/k8s-netchecker` docker
|
||||||
images. It consists of the server and agents trying to reach the server by usual
|
images. It consists of the server and agents trying to reach the server by usual
|
||||||
for Kubernetes applications network connectivity meanings. Therefore, this
|
for Kubernetes applications network connectivity meanings. Therefore, this
|
||||||
@@ -17,7 +17,7 @@ any of the cluster nodes:
|
|||||||
```
|
```
|
||||||
curl http://localhost:31081/api/v1/connectivity_check
|
curl http://localhost:31081/api/v1/connectivity_check
|
||||||
```
|
```
|
||||||
Note that Kargo does not invoke the check but only deploys the application, if
|
Note that Kubespray does not invoke the check but only deploys the application, if
|
||||||
requested.
|
requested.
|
||||||
|
|
||||||
There are related application specifc variables:
|
There are related application specifc variables:
|
||||||
|
|||||||
@@ -35,14 +35,12 @@ Then you can use the instance ids to find the connected [neutron](https://wiki.o
|
|||||||
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
| 5662a4e0-e646-47f0-bf88-d80fbd2d99ef | e1f48aad-df96-4bce-bf61-62ae12bf3f95 |
|
||||||
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
| e5ae2045-a1e1-4e99-9aac-4353889449a7 | 725cd548-6ea3-426b-baaa-e7306d3c8052 |
|
||||||
|
|
||||||
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron:
|
Given the port ids on the left, you can set the `allowed_address_pairs` in neutron.
|
||||||
|
Note that you have to allow both of `kube_service_addresses` (default `10.233.0.0/18`)
|
||||||
|
and `kube_pods_subnet` (default `10.233.64.0/18`.)
|
||||||
|
|
||||||
# allow kube_service_addresses network
|
# allow kube_service_addresses and kube_pods_subnet network
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18
|
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.0.0/18 ip_address=10.233.64.0/18
|
||||||
|
|
||||||
# allow kube_pods_subnet network
|
|
||||||
neutron port-update 5662a4e0-e646-47f0-bf88-d80fbd2d99ef --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
neutron port-update e5ae2045-a1e1-4e99-9aac-4353889449a7 --allowed_address_pairs list=true type=dict ip_address=10.233.64.0/18
|
|
||||||
|
|
||||||
Now you can finally run the playbook.
|
Now you can finally run the playbook.
|
||||||
|
|||||||
19
docs/opensuse.md
Normal file
19
docs/opensuse.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
openSUSE Leap 42.3 and Tumbleweed
|
||||||
|
===============
|
||||||
|
|
||||||
|
openSUSE Leap installation Notes:
|
||||||
|
|
||||||
|
- Install Ansible
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo zypper ref
|
||||||
|
sudo zypper -n install ansible
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
- Install Jinja2 and Python-Netaddr
|
||||||
|
|
||||||
|
```sudo zypper -n install python-Jinja2 python-netaddr```
|
||||||
|
|
||||||
|
|
||||||
|
Now you can continue with [Preparing your deployment](getting-started.md#starting-custom-deployment)
|
||||||
@@ -1,71 +1,47 @@
|
|||||||
Kargo's roadmap
|
Kubespray's roadmap
|
||||||
=================
|
=================
|
||||||
|
|
||||||
### Kubeadm
|
### Kubeadm
|
||||||
- Propose kubeadm as an option in order to setup the kubernetes cluster.
|
- Switch to kubeadm deployment as the default method after some bugs are fixed:
|
||||||
That would probably improve deployment speed and certs management [#553](https://github.com/kubespray/kargo/issues/553)
|
* Support for basic auth
|
||||||
|
* cloudprovider cloud-config mount [#484](https://github.com/kubernetes/kubeadm/issues/484)
|
||||||
|
|
||||||
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kargo/issues/320)
|
### Self deployment (pull-mode) [#320](https://github.com/kubespray/kubespray/issues/320)
|
||||||
- the playbook would install and configure docker/rkt and the etcd cluster
|
- the playbook would install and configure docker/rkt and the etcd cluster
|
||||||
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
- the following data would be inserted into etcd: certs,tokens,users,inventory,group_vars.
|
||||||
- a "kubespray" container would be deployed (kargo-cli, ansible-playbook, kpm)
|
- a "kubespray" container would be deployed (kubespray-cli, ansible-playbook, kpm)
|
||||||
- to be discussed, a way to provide the inventory
|
- to be discussed, a way to provide the inventory
|
||||||
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kargo/issues/321)
|
- **self deployment** of the node from inside a container [#321](https://github.com/kubespray/kubespray/issues/321)
|
||||||
|
|
||||||
### Provisionning and cloud providers
|
### Provisioning and cloud providers
|
||||||
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
- [ ] Terraform to provision instances on **GCE, AWS, Openstack, Digital Ocean, Azure**
|
||||||
- [ ] On AWS autoscaling, multi AZ
|
- [ ] On AWS autoscaling, multi AZ
|
||||||
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kargo/issues/297)
|
- [ ] On Azure autoscaling, create loadbalancer [#297](https://github.com/kubespray/kubespray/issues/297)
|
||||||
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kargo/issues/280)
|
- [ ] On GCE be able to create a loadbalancer automatically (IAM ?) [#280](https://github.com/kubespray/kubespray/issues/280)
|
||||||
- [x] **TLS boostrap** support for kubelet [#234](https://github.com/kubespray/kargo/issues/234)
|
- [x] **TLS boostrap** support for kubelet (covered by kubeadm, but not in standard deployment) [#234](https://github.com/kubespray/kubespray/issues/234)
|
||||||
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
(related issues: https://github.com/kubernetes/kubernetes/pull/20439 <br>
|
||||||
https://github.com/kubernetes/kubernetes/issues/18112)
|
https://github.com/kubernetes/kubernetes/issues/18112)
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
- [x] Run kubernetes e2e tests
|
- [ ] Run kubernetes e2e tests
|
||||||
- [x] migrate to jenkins
|
- [ ] Test idempotency on on single OS but for all network plugins/container engines
|
||||||
(a test is currently a deployment on a 3 node cluste, testing k8s api, ping between 2 pods)
|
|
||||||
- [x] Full tests on GCE per day (All OS's, all network plugins)
|
|
||||||
- [x] trigger a single test per pull request
|
|
||||||
- [ ] ~~single test with the Ansible version n-1 per day~~
|
|
||||||
- [x] Test idempotency on on single OS but for all network plugins/container engines
|
|
||||||
- [ ] single test on AWS per day
|
- [ ] single test on AWS per day
|
||||||
- [x] test different achitectures :
|
|
||||||
- 3 instances, 3 are members of the etcd cluster, 2 of them acting as master and node, 1 as node
|
|
||||||
- 5 instances, 3 are etcd and nodes, 2 are masters only
|
|
||||||
- 7 instances, 3 etcd only, 2 masters, 2 nodes
|
|
||||||
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
- [ ] test scale up cluster: +1 etcd, +1 master, +1 node
|
||||||
|
- [ ] Reorganize CI test vars into group var files
|
||||||
|
|
||||||
### Lifecycle
|
### Lifecycle
|
||||||
- [ ] Adopt the kubeadm tool by delegating CM tasks it is capable to accomplish well [#553](https://github.com/kubespray/kargo/issues/553)
|
|
||||||
- [x] Drain worker node when upgrading k8s components in a worker node. [#154](https://github.com/kubespray/kargo/issues/154)
|
|
||||||
- [ ] Drain worker node when shutting down/deleting an instance
|
|
||||||
- [ ] Upgrade granularity: select components to upgrade and skip others
|
- [ ] Upgrade granularity: select components to upgrade and skip others
|
||||||
|
|
||||||
### Networking
|
### Networking
|
||||||
- [ ] romana.io support [#160](https://github.com/kubespray/kargo/issues/160)
|
|
||||||
- [ ] Configure network policy for Calico. [#159](https://github.com/kubespray/kargo/issues/159)
|
|
||||||
- [ ] Opencontrail
|
- [ ] Opencontrail
|
||||||
- [x] Canal
|
- [ ] Consolidate network_plugins and kubernetes-apps/network_plugins
|
||||||
- [x] Cloud Provider native networking (instead of our network plugins)
|
|
||||||
|
|
||||||
### High availability
|
### Kubespray API
|
||||||
- (to be discussed) option to set a loadbalancer for the apiservers like ucarp/packemaker/keepalived
|
|
||||||
While waiting for the issue [kubernetes/kubernetes#18174](https://github.com/kubernetes/kubernetes/issues/18174) to be fixed.
|
|
||||||
|
|
||||||
### Kargo-cli
|
|
||||||
- Delete instances
|
|
||||||
- `kargo vagrant` to setup a test cluster locally
|
|
||||||
- `kargo azure` for Microsoft Azure support
|
|
||||||
- switch to Terraform instead of Ansible for provisionning
|
|
||||||
- update $HOME/.kube/config when a cluster is deployed. Optionally switch to this context
|
|
||||||
|
|
||||||
### Kargo API
|
|
||||||
- Perform all actions through an **API**
|
- Perform all actions through an **API**
|
||||||
- Store inventories / configurations of mulltiple clusters
|
- Store inventories / configurations of mulltiple clusters
|
||||||
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
- make sure that state of cluster is completely saved in no more than one config file beyond hosts inventory
|
||||||
|
|
||||||
### Addons (with kpm)
|
### Addons (helm or native ansible)
|
||||||
Include optionals deployments to init the cluster:
|
Include optionals deployments to init the cluster:
|
||||||
##### Monitoring
|
##### Monitoring
|
||||||
- Heapster / Grafana ....
|
- Heapster / Grafana ....
|
||||||
@@ -85,10 +61,10 @@ Include optionals deployments to init the cluster:
|
|||||||
- Deis Workflow
|
- Deis Workflow
|
||||||
|
|
||||||
### Others
|
### Others
|
||||||
- remove nodes (adding is already supported)
|
- remove nodes (adding is already supported)
|
||||||
- being able to choose any k8s version (almost done)
|
- Organize and update documentation (split in categories)
|
||||||
- **rkt** support [#59](https://github.com/kubespray/kargo/issues/59)
|
- Refactor downloads so it all runs in the beginning of deployment
|
||||||
- Review documentation (split in categories)
|
- Make bootstrapping OS more consistent
|
||||||
- **consul** -> if officialy supported by k8s
|
- **consul** -> if officialy supported by k8s
|
||||||
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kargo/issues/312)
|
- flex volumes options (e.g. **torrus** support) [#312](https://github.com/kubespray/kubespray/issues/312)
|
||||||
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kargo/issues/329)
|
- Clusters federation option (aka **ubernetes**) [#329](https://github.com/kubespray/kubespray/issues/329)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
Upgrading Kubernetes in Kargo
|
Upgrading Kubernetes in Kubespray
|
||||||
=============================
|
=============================
|
||||||
|
|
||||||
#### Description
|
#### Description
|
||||||
|
|
||||||
Kargo handles upgrades the same way it handles initial deployment. That is to
|
Kubespray handles upgrades the same way it handles initial deployment. That is to
|
||||||
say that each component is laid down in a fixed order. You should be able to
|
say that each component is laid down in a fixed order. You should be able to
|
||||||
upgrade from Kargo tag 2.0 up to the current master without difficulty. You can
|
upgrade from Kubespray tag 2.0 up to the current master without difficulty. You can
|
||||||
also individually control versions of components by explicitly defining their
|
also individually control versions of components by explicitly defining their
|
||||||
versions. Here are all version vars for each component:
|
versions. Here are all version vars for each component:
|
||||||
|
|
||||||
@@ -24,18 +24,18 @@ If you wanted to upgrade just kube_version from v1.4.3 to v1.4.6, you could
|
|||||||
deploy the following way:
|
deploy the following way:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.3
|
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.3
|
||||||
```
|
```
|
||||||
|
|
||||||
And then repeat with v1.4.6 as kube_version:
|
And then repeat with v1.4.6 as kube_version:
|
||||||
|
|
||||||
```
|
```
|
||||||
ansible-playbook cluster.yml -i inventory/inventory.cfg -e kube_version=v1.4.6
|
ansible-playbook cluster.yml -i inventory/sample/hosts.ini -e kube_version=v1.4.6
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Graceful upgrade
|
#### Graceful upgrade
|
||||||
|
|
||||||
Kargo also supports cordon, drain and uncordoning of nodes when performing
|
Kubespray also supports cordon, drain and uncordoning of nodes when performing
|
||||||
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
a cluster upgrade. There is a separate playbook used for this purpose. It is
|
||||||
important to note that upgrade-cluster.yml can only be used for upgrading an
|
important to note that upgrade-cluster.yml can only be used for upgrading an
|
||||||
existing cluster. That means there must be at least 1 kube-master already
|
existing cluster. That means there must be at least 1 kube-master already
|
||||||
@@ -44,7 +44,15 @@ deployed.
|
|||||||
```
|
```
|
||||||
git fetch origin
|
git fetch origin
|
||||||
git checkout origin/master
|
git checkout origin/master
|
||||||
ansible-playbook upgrade-cluster.yml -b -i inventory/inventory.cfg
|
ansible-playbook upgrade-cluster.yml -b -i inventory/sample/hosts.ini -e kube_version=v1.6.0
|
||||||
|
```
|
||||||
|
|
||||||
|
After a successul upgrade, the Server Version should be updated:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl version
|
||||||
|
Client Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0", GitCommit:"fff5156092b56e6bd60fff75aad4dc9de6b6ef37", GitTreeState:"clean", BuildDate:"2017-03-28T19:15:41Z", GoVersion:"go1.8", Compiler:"gc", Platform:"darwin/amd64"}
|
||||||
|
Server Version: version.Info{Major:"1", Minor:"6", GitVersion:"v1.6.0+coreos.0", GitCommit:"8031716957d697332f9234ddf85febb07ac6c3e3", GitTreeState:"clean", BuildDate:"2017-03-29T04:33:09Z", GoVersion:"go1.7.5", Compiler:"gc", Platform:"linux/amd64"}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Upgrade order
|
#### Upgrade order
|
||||||
@@ -59,3 +67,17 @@ follows:
|
|||||||
* network_plugin (such as Calico or Weave)
|
* network_plugin (such as Calico or Weave)
|
||||||
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
* kube-apiserver, kube-scheduler, and kube-controller-manager
|
||||||
* Add-ons (such as KubeDNS)
|
* Add-ons (such as KubeDNS)
|
||||||
|
|
||||||
|
#### Upgrade considerations
|
||||||
|
|
||||||
|
Kubespray supports rotating certificates used for etcd and Kubernetes
|
||||||
|
components, but some manual steps may be required. If you have a pod that
|
||||||
|
requires use of a service token and is deployed in a namespace other than
|
||||||
|
`kube-system`, you will need to manually delete the affected pods after
|
||||||
|
rotating certificates. This is because all service account tokens are dependent
|
||||||
|
on the apiserver token that is used to generate them. When the certificate
|
||||||
|
rotates, all service account tokens must be rotated as well. During the
|
||||||
|
kubernetes-apps/rotate_tokens role, only pods in kube-system are destroyed and
|
||||||
|
recreated. All other invalidated service account tokens are cleaned up
|
||||||
|
automatically, but other pods are not deleted out of an abundance of caution
|
||||||
|
for impact to user deployed pods.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
Vagrant Install
|
Vagrant Install
|
||||||
=================
|
=================
|
||||||
|
|
||||||
Assuming you have Vagrant (1.8+) installed with virtualbox (it may work
|
Assuming you have Vagrant (2.0+) installed with virtualbox (it may work
|
||||||
with vmware, but is untested) you should be able to launch a 3 node
|
with vmware, but is untested) you should be able to launch a 3 node
|
||||||
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
Kubernetes cluster by simply running `$ vagrant up`.<br />
|
||||||
|
|
||||||
@@ -39,3 +39,31 @@ k8s-01 Ready 45s
|
|||||||
k8s-02 Ready 45s
|
k8s-02 Ready 45s
|
||||||
k8s-03 Ready 45s
|
k8s-03 Ready 45s
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Customize Vagrant
|
||||||
|
=================
|
||||||
|
|
||||||
|
You can override the default settings in the `Vagrantfile` either by directly modifying the `Vagrantfile`
|
||||||
|
or through an override file.
|
||||||
|
|
||||||
|
In the same directory as the `Vagrantfile`, create a folder called `vagrant` and create `config.rb` file in it.
|
||||||
|
|
||||||
|
You're able to override the variables defined in `Vagrantfile` by providing the value in the `vagrant/config.rb` file,
|
||||||
|
e.g.:
|
||||||
|
|
||||||
|
echo '$forwarded_ports = {8001 => 8001}' >> vagrant/config.rb
|
||||||
|
|
||||||
|
and after `vagrant up` or `vagrant reload`, your host will have port forwarding setup with the guest on port 8001.
|
||||||
|
|
||||||
|
Use alternative OS for Vagrant
|
||||||
|
==============================
|
||||||
|
|
||||||
|
By default, Vagrant uses Ubuntu 16.04 box to provision a local cluster. You may use an alternative supported
|
||||||
|
operating system for your local cluster.
|
||||||
|
|
||||||
|
Customize `$os` variable in `Vagrantfile` or as override, e.g.,:
|
||||||
|
|
||||||
|
echo '$os = "coreos-stable"' >> vagrant/config.rb
|
||||||
|
|
||||||
|
|
||||||
|
The supported operating systems for vagrant are defined in the `SUPPORTED_OS` constant in the `Vagrantfile`.
|
||||||
|
|||||||
70
docs/vars.md
70
docs/vars.md
@@ -1,4 +1,4 @@
|
|||||||
Configurable Parameters in Kargo
|
Configurable Parameters in Kubespray
|
||||||
================================
|
================================
|
||||||
|
|
||||||
#### Generic Ansible variables
|
#### Generic Ansible variables
|
||||||
@@ -12,7 +12,7 @@ Some variables of note include:
|
|||||||
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
* *ansible_default_ipv4.address*: IP address Ansible automatically chooses.
|
||||||
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
Generated based on the output from the command ``ip -4 route get 8.8.8.8``
|
||||||
|
|
||||||
#### Common vars that are used in Kargo
|
#### Common vars that are used in Kubespray
|
||||||
|
|
||||||
* *calico_version* - Specify version of Calico to use
|
* *calico_version* - Specify version of Calico to use
|
||||||
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
* *calico_cni_version* - Specify version of Calico CNI plugin to use
|
||||||
@@ -28,6 +28,7 @@ Some variables of note include:
|
|||||||
* *kube_version* - Specify a given Kubernetes hyperkube version
|
* *kube_version* - Specify a given Kubernetes hyperkube version
|
||||||
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
* *searchdomains* - Array of DNS domains to search when looking up hostnames
|
||||||
* *nameservers* - Array of nameservers to use for DNS lookup
|
* *nameservers* - Array of nameservers to use for DNS lookup
|
||||||
|
* *preinstall_selinux_state* - Set selinux state, permitted values are permissive and disabled.
|
||||||
|
|
||||||
#### Addressing variables
|
#### Addressing variables
|
||||||
|
|
||||||
@@ -35,16 +36,16 @@ Some variables of note include:
|
|||||||
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
* *access_ip* - IP for other hosts to use to connect to. Often required when
|
||||||
deploying from a cloud, such as OpenStack or GCE and you have separate
|
deploying from a cloud, such as OpenStack or GCE and you have separate
|
||||||
public/floating and private IPs.
|
public/floating and private IPs.
|
||||||
* *ansible_default_ipv4.address* - Not Kargo-specific, but it is used if ip
|
* *ansible_default_ipv4.address* - Not Kubespray-specific, but it is used if ip
|
||||||
and access_ip are undefined
|
and access_ip are undefined
|
||||||
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
* *loadbalancer_apiserver* - If defined, all hosts will connect to this
|
||||||
address instead of localhost for kube-masters and kube-master[0] for
|
address instead of localhost for kube-masters and kube-master[0] for
|
||||||
kube-nodes. See more details in the
|
kube-nodes. See more details in the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
* *loadbalancer_apiserver_localhost* - makes all hosts to connect to
|
||||||
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
the apiserver internally load balanced endpoint. Mutual exclusive to the
|
||||||
`loadbalancer_apiserver`. See more details in the
|
`loadbalancer_apiserver`. See more details in the
|
||||||
[HA guide](https://github.com/kubernetes-incubator/kargo/blob/master/docs/ha-mode.md).
|
[HA guide](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/ha-mode.md).
|
||||||
|
|
||||||
#### Cluster variables
|
#### Cluster variables
|
||||||
|
|
||||||
@@ -61,12 +62,23 @@ following default cluster paramters:
|
|||||||
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
* *kube_network_node_prefix* - Subnet allocated per-node for pod IPs. Remainin
|
||||||
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
bits in kube_pods_subnet dictates how many kube-nodes can be in cluster.
|
||||||
* *dns_setup* - Enables dnsmasq
|
* *dns_setup* - Enables dnsmasq
|
||||||
* *dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
* *dnsmasq_dns_server* - Cluster IP for dnsmasq (default is 10.233.0.2)
|
||||||
* *skydns_server* - Cluster IP for KubeDNS (default is 10.233.0.3)
|
* *skydns_server* - Cluster IP for DNS (default is 10.233.0.3)
|
||||||
|
* *skydns_server_secondary* - Secondary Cluster IP for CoreDNS used with coredns_dual deployment (default is 10.233.0.4)
|
||||||
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
* *cloud_provider* - Enable extra Kubelet option if operating inside GCE or
|
||||||
OpenStack (default is unset)
|
OpenStack (default is unset)
|
||||||
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
* *kube_hostpath_dynamic_provisioner* - Required for use of PetSets type in
|
||||||
Kubernetes
|
Kubernetes
|
||||||
|
* *kube_feature_gates* - A list of key=value pairs that describe feature gates for
|
||||||
|
alpha/experimental Kubernetes features. (defaults is `[]`)
|
||||||
|
* *authorization_modes* - A list of [authorization mode](
|
||||||
|
https://kubernetes.io/docs/admin/authorization/#using-flags-for-your-authorization-module)
|
||||||
|
that the cluster should be configured for. Defaults to `['Node', 'RBAC']`
|
||||||
|
(Node and RBAC authorizers).
|
||||||
|
Note: `Node` and `RBAC` are enabled by default. Previously deployed clusters can be
|
||||||
|
converted to RBAC mode. However, your apps which rely on Kubernetes API will
|
||||||
|
require a service account and cluster role bindings. You can override this
|
||||||
|
setting by setting authorization_modes to `[]`.
|
||||||
|
|
||||||
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
Note, if cloud providers have any use of the ``10.233.0.0/16``, like instances'
|
||||||
private addresses, make sure to pick another values for ``kube_service_addresses``
|
private addresses, make sure to pick another values for ``kube_service_addresses``
|
||||||
@@ -79,26 +91,60 @@ other settings from your existing /etc/resolv.conf are lost. Set the following
|
|||||||
variables to match your requirements.
|
variables to match your requirements.
|
||||||
|
|
||||||
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
* *upstream_dns_servers* - Array of upstream DNS servers configured on host in
|
||||||
addition to Kargo deployed DNS
|
addition to Kubespray deployed DNS
|
||||||
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
* *nameservers* - Array of DNS servers configured for use in dnsmasq
|
||||||
* *searchdomains* - Array of up to 4 search domains
|
* *searchdomains* - Array of up to 4 search domains
|
||||||
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
* *skip_dnsmasq* - Don't set up dnsmasq (use only KubeDNS)
|
||||||
|
|
||||||
For more information, see [DNS
|
For more information, see [DNS
|
||||||
Stack](https://github.com/kubernetes-incubator/kargo/blob/master/docs/dns-stack.md).
|
Stack](https://github.com/kubernetes-incubator/kubespray/blob/master/docs/dns-stack.md).
|
||||||
|
|
||||||
#### Other service variables
|
#### Other service variables
|
||||||
|
|
||||||
* *docker_options* - Commonly used to set
|
* *docker_options* - Commonly used to set
|
||||||
``--insecure-registry=myregistry.mydomain:5000``
|
``--insecure-registry=myregistry.mydomain:5000``
|
||||||
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
* *http_proxy/https_proxy/no_proxy* - Proxy variables for deploying behind a
|
||||||
proxy
|
proxy. Note that no_proxy defaults to all internal cluster IPs and hostnames
|
||||||
|
that correspond to each node.
|
||||||
|
* *kubelet_deployment_type* - Controls which platform to deploy kubelet on.
|
||||||
|
Available options are ``host``, ``rkt``, and ``docker``. ``docker`` mode
|
||||||
|
is unlikely to work on newer releases. Starting with Kubernetes v1.7
|
||||||
|
series, this now defaults to ``host``. Before v1.7, the default was Docker.
|
||||||
|
This is because of cgroup [issues](https://github.com/kubernetes/kubernetes/issues/43704).
|
||||||
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
* *kubelet_load_modules* - For some things, kubelet needs to load kernel modules. For example,
|
||||||
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
dynamic kernel services are needed for mounting persistent volumes into containers. These may not be
|
||||||
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
loaded by preinstall kubernetes processes. For example, ceph and rbd backed volumes. Set this variable to
|
||||||
true to let kubelet load kernel modules.
|
true to let kubelet load kernel modules.
|
||||||
|
* *kubelet_cgroup_driver* - Allows manual override of the
|
||||||
|
cgroup-driver option for Kubelet. By default autodetection is used
|
||||||
|
to match Docker configuration.
|
||||||
|
* *node_labels* - Labels applied to nodes via kubelet --node-labels parameter.
|
||||||
|
For example, labels can be set in the inventory as variables or more widely in group_vars.
|
||||||
|
*node_labels* must be defined as a dict:
|
||||||
|
```
|
||||||
|
node_labels:
|
||||||
|
label1_name: label1_value
|
||||||
|
label2_name: label2_value
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Custom flags for Kube Components
|
||||||
|
For all kube components, custom flags can be passed in. This allows for edge cases where users need changes to the default deployment that may not be applicable to all deployments. This can be done by providing a list of flags. Example:
|
||||||
|
```
|
||||||
|
kubelet_custom_flags:
|
||||||
|
- "--eviction-hard=memory.available<100Mi"
|
||||||
|
- "--eviction-soft-grace-period=memory.available=30s"
|
||||||
|
- "--eviction-soft=memory.available<300Mi"
|
||||||
|
```
|
||||||
|
The possible vars are:
|
||||||
|
* *apiserver_custom_flags*
|
||||||
|
* *controller_mgr_custom_flags*
|
||||||
|
* *scheduler_custom_flags*
|
||||||
|
* *kubelet_custom_flags*
|
||||||
|
|
||||||
#### User accounts
|
#### User accounts
|
||||||
|
|
||||||
Kargo sets up two Kubernetes accounts by default: ``root`` and ``kube``. Their
|
By default, a user with admin rights is created, named `kube`.
|
||||||
passwords default to changeme. You can set this by changing ``kube_api_pwd``.
|
The password can be viewed after deployment by looking at the file
|
||||||
|
`PATH_TO_KUBESPRAY/credentials/kube_user.creds`. This contains a randomly generated
|
||||||
|
password. If you wish to set your own password, just precreate/modify this
|
||||||
|
file yourself or change `kube_api_pwd` var.
|
||||||
|
|||||||
@@ -24,8 +24,7 @@ hardcoded to only create a Vault role for Etcd.
|
|||||||
This step is where the long-term Vault cluster is started and configured. Its
|
This step is where the long-term Vault cluster is started and configured. Its
|
||||||
first task, is to stop any temporary instances of Vault, to free the port for
|
first task, is to stop any temporary instances of Vault, to free the port for
|
||||||
the long-term. At the end of this task, the entire Vault cluster should be up
|
the long-term. At the end of this task, the entire Vault cluster should be up
|
||||||
and read to go.
|
and ready to go.
|
||||||
|
|
||||||
|
|
||||||
Keys to the Kingdom
|
Keys to the Kingdom
|
||||||
-------------------
|
-------------------
|
||||||
@@ -39,35 +38,43 @@ vault group.
|
|||||||
It is *highly* recommended that these secrets are removed from the servers after
|
It is *highly* recommended that these secrets are removed from the servers after
|
||||||
your cluster has been deployed, and kept in a safe location of your choosing.
|
your cluster has been deployed, and kept in a safe location of your choosing.
|
||||||
Naturally, the seriousness of the situation depends on what you're doing with
|
Naturally, the seriousness of the situation depends on what you're doing with
|
||||||
your Kargo cluster, but with these secrets, an attacker will have the ability
|
your Kubespray cluster, but with these secrets, an attacker will have the ability
|
||||||
to authenticate to almost everything in Kubernetes and decode all private
|
to authenticate to almost everything in Kubernetes and decode all private
|
||||||
(HTTPS) traffic on your network signed by Vault certificates.
|
(HTTPS) traffic on your network signed by Vault certificates.
|
||||||
|
|
||||||
For even greater security, you may want to remove and store elsewhere any
|
For even greater security, you may want to remove and store elsewhere any
|
||||||
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
CA keys generated as well (e.g. /etc/vault/ssl/ca-key.pem).
|
||||||
|
|
||||||
Vault by default encrypts all traffic to and from the datastore backend, all
|
Vault by default encrypts all traffic to and from the datastore backend, all
|
||||||
resting data, and uses TLS for its TCP listener. It is recommended that you
|
resting data, and uses TLS for its TCP listener. It is recommended that you
|
||||||
do not change the Vault config to disable TLS, unless you absolutely have to.
|
do not change the Vault config to disable TLS, unless you absolutely have to.
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
-----
|
-----
|
||||||
|
|
||||||
To get the Vault role running, you must to do two things at a minimum:
|
To get the Vault role running, you must to do two things at a minimum:
|
||||||
|
|
||||||
1. Assign the ``vault`` group to at least 1 node in your inventory
|
1. Assign the ``vault`` group to at least 1 node in your inventory
|
||||||
2. Change ``cert_management`` to be ``vault`` instead of ``script``
|
1. Change ``cert_management`` to be ``vault`` instead of ``script``
|
||||||
|
|
||||||
Nothing else is required, but customization is possible. Check
|
Nothing else is required, but customization is possible. Check
|
||||||
``roles/vault/defaults/main.yml`` for the different variables that can be
|
``roles/vault/defaults/main.yml`` for the different variables that can be
|
||||||
overridden, most common being ``vault_config``, ``vault_port``, and
|
overridden, most common being ``vault_config``, ``vault_port``, and
|
||||||
``vault_deployment_type``.
|
``vault_deployment_type``.
|
||||||
|
|
||||||
Also, if you intend to use a Root or Intermediate CA generated elsewhere,
|
As a result of the Vault role will be create separated Root CA for `etcd`,
|
||||||
you'll need to copy the certificate and key to the hosts in the vault group
|
`kubernetes` and `vault`. Also, if you intend to use a Root or Intermediate CA
|
||||||
prior to running the vault role. By default, they'll be located at
|
generated elsewhere, you'll need to copy the certificate and key to the hosts in the vault group prior to running the vault role. By default, they'll be located at:
|
||||||
``/etc/vault/ssl/ca.pem`` and ``/etc/vault/ssl/ca-key.pem``, respectively.
|
|
||||||
|
* vault:
|
||||||
|
* ``/etc/vault/ssl/ca.pem``
|
||||||
|
* ``/etc/vault/ssl/ca-key.pem``
|
||||||
|
* etcd:
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca.pem``
|
||||||
|
* ``/etc/ssl/etcd/ssl/ca-key.pem``
|
||||||
|
* kubernetes:
|
||||||
|
* ``/etc/kubernetes/ssl/ca.pem``
|
||||||
|
* ``/etc/kubernetes/ssl/ca-key.pem``
|
||||||
|
|
||||||
Additional Notes:
|
Additional Notes:
|
||||||
|
|
||||||
@@ -77,7 +84,6 @@ Additional Notes:
|
|||||||
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
credentials are saved to ``/etc/vault/roles/<role>/``. The service will
|
||||||
need to read in those credentials, if they want to interact with Vault.
|
need to read in those credentials, if they want to interact with Vault.
|
||||||
|
|
||||||
|
|
||||||
Potential Work
|
Potential Work
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
@@ -87,6 +93,3 @@ Potential Work
|
|||||||
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
- Add the ability to start temp Vault with Host, Rkt, or Docker
|
||||||
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
- Add a dynamic way to change out the backend role creation during Bootstrap,
|
||||||
so other services can be used (such as Consul)
|
so other services can be used (such as Consul)
|
||||||
- Segregate Server Cert generation from Auth Cert generation (separate CAs).
|
|
||||||
This work was partially started with the `auth_cert_backend` tasks, but would
|
|
||||||
need to be further applied to all roles (particularly Etcd and Kubernetes).
|
|
||||||
|
|||||||
64
docs/vsphere.md
Normal file
64
docs/vsphere.md
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# vSphere cloud provider
|
||||||
|
|
||||||
|
Kubespray can be deployed with vSphere as Cloud provider. This feature supports
|
||||||
|
- Volumes
|
||||||
|
- Persistent Volumes
|
||||||
|
- Storage Classes and provisioning of volumes.
|
||||||
|
- vSphere Storage Policy Based Management for Containers orchestrated by Kubernetes.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
You need at first to configure you vSphere environement by following the [official documentation](https://kubernetes.io/docs/getting-started-guides/vsphere/#vsphere-cloud-provider).
|
||||||
|
|
||||||
|
After this step you should have:
|
||||||
|
- UUID activated for each VM where Kubernetes will be deployed
|
||||||
|
- A vSphere account with required privileges
|
||||||
|
|
||||||
|
## Kubespray configuration
|
||||||
|
|
||||||
|
Fist you must define the cloud provider in `inventory/sample/group_vars/all.yml` and set it to `vsphere`.
|
||||||
|
```yml
|
||||||
|
cloud_provider: vsphere
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, in the same file, you need to declare your vCenter credential following the description bellow.
|
||||||
|
|
||||||
|
| Variable | Required | Type | Choices | Default | Comment |
|
||||||
|
|------------------------------|----------|---------|----------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| vsphere_vcenter_ip | TRUE | string | | | IP/URL of the vCenter |
|
||||||
|
| vsphere_vcenter_port | TRUE | integer | | | Port of the vCenter API. Commonly 443 |
|
||||||
|
| vsphere_insecure | TRUE | integer | 1, 0 | | set to 1 if the host above uses a self-signed cert |
|
||||||
|
| vsphere_user | TRUE | string | | | User name for vCenter with required privileges |
|
||||||
|
| vsphere_password | TRUE | string | | | Password for vCenter |
|
||||||
|
| vsphere_datacenter | TRUE | string | | | Datacenter name to use |
|
||||||
|
| vsphere_datastore | TRUE | string | | | Datastore name to use |
|
||||||
|
| vsphere_working_dir | TRUE | string | | | Working directory from the view "VMs and template" in the vCenter where VM are placed |
|
||||||
|
| vsphere_scsi_controller_type | TRUE | string | buslogic, pvscsi, parallel | pvscsi | SCSI controller name. Commonly "pvscsi". |
|
||||||
|
| vsphere_vm_uuid | FALSE | string | | | VM Instance UUID of virtual machine that host K8s master. Can be retrieved from instanceUuid property in VmConfigInfo, or as vc.uuid in VMX file or in `/sys/class/dmi/id/product_serial` (Optional, only used for Kubernetes <= 1.9.2) |
|
||||||
|
| vsphere_public_network | FALSE | string | | Blank | Name of the network the VMs are joined to |
|
||||||
|
| vsphere_resource_pool | FALSE | string | | Blank | Name of the Resource pool where the VMs are located (Optional, only used for Kubernetes >= 1.9.2) |
|
||||||
|
|
||||||
|
Example configuration
|
||||||
|
|
||||||
|
```yml
|
||||||
|
vsphere_vcenter_ip: "myvcenter.domain.com"
|
||||||
|
vsphere_vcenter_port: 443
|
||||||
|
vsphere_insecure: 1
|
||||||
|
vsphere_user: "k8s@vsphere.local"
|
||||||
|
vsphere_password: "K8s_admin"
|
||||||
|
vsphere_datacenter: "DATACENTER_name"
|
||||||
|
vsphere_datastore: "DATASTORE_name"
|
||||||
|
vsphere_working_dir: "Docker_hosts"
|
||||||
|
vsphere_scsi_controller_type: "pvscsi"
|
||||||
|
vsphere_resource_pool: "K8s-Pool"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
Once the configuration is set, you can execute the playbook again to apply the new configuration
|
||||||
|
```
|
||||||
|
cd kubespray
|
||||||
|
ansible-playbook -i inventory/sample/hosts.ini -b -v cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
You'll find some usefull examples [here](https://github.com/kubernetes/kubernetes/tree/master/examples/volumes/vsphere) to test your configuration.
|
||||||
98
docs/weave.md
Normal file
98
docs/weave.md
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
Weave
|
||||||
|
=======
|
||||||
|
|
||||||
|
Weave 2.0.1 is supported by kubespray
|
||||||
|
|
||||||
|
Weave uses [**consensus**](https://www.weave.works/docs/net/latest/ipam/##consensus) mode (default mode) and [**seed**](https://www.weave.works/docs/net/latest/ipam/#seed) mode.
|
||||||
|
|
||||||
|
`Consensus` mode is best to use on static size cluster and `seed` mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
Weave encryption is supported for all communication
|
||||||
|
|
||||||
|
* To use Weave encryption, specify a strong password (if no password, no encrytion)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
weave_password: EnterPasswordHere
|
||||||
|
```
|
||||||
|
|
||||||
|
This password is used to set an environment variable inside weave container.
|
||||||
|
|
||||||
|
Weave is deployed by kubespray using a daemonSet
|
||||||
|
|
||||||
|
* Check the status of Weave containers
|
||||||
|
|
||||||
|
```
|
||||||
|
# From client
|
||||||
|
kubectl -n kube-system get pods | grep weave
|
||||||
|
# output
|
||||||
|
weave-net-50wd2 2/2 Running 0 2m
|
||||||
|
weave-net-js9rb 2/2 Running 0 2m
|
||||||
|
```
|
||||||
|
There must be as many pods as nodes (here kubernetes have 2 nodes so there are 2 weave pods).
|
||||||
|
|
||||||
|
* Check status of weave (connection,encryption ...) for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
curl http://127.0.0.1:6784/status
|
||||||
|
# output on node1
|
||||||
|
Version: 2.0.1 (up to date; next check at 2017/08/01 13:51:34)
|
||||||
|
|
||||||
|
Service: router
|
||||||
|
Protocol: weave 1..2
|
||||||
|
Name: fa:16:3e:b3:d6:b2(node1)
|
||||||
|
Encryption: enabled
|
||||||
|
PeerDiscovery: enabled
|
||||||
|
Targets: 2
|
||||||
|
Connections: 2 (1 established, 1 failed)
|
||||||
|
Peers: 2 (with 2 established connections)
|
||||||
|
TrustedSubnets: none
|
||||||
|
|
||||||
|
Service: ipam
|
||||||
|
Status: ready
|
||||||
|
Range: 10.233.64.0/18
|
||||||
|
DefaultSubnet: 10.233.64.0/18
|
||||||
|
```
|
||||||
|
|
||||||
|
* Check parameters of weave for each node
|
||||||
|
|
||||||
|
```
|
||||||
|
# On nodes
|
||||||
|
ps -aux | grep weaver
|
||||||
|
# output on node1 (here its use seed mode)
|
||||||
|
root 8559 0.2 3.0 365280 62700 ? Sl 08:25 0:00 /home/weave/weaver --name=fa:16:3e:b3:d6:b2 --port=6783 --datapath=datapath --host-root=/host --http-addr=127.0.0.1:6784 --status-addr=0.0.0.0:6782 --docker-api= --no-dns --db-prefix=/weavedb/weave-net --ipalloc-range=10.233.64.0/18 --nickname=node1 --ipalloc-init seed=fa:16:3e:b3:d6:b2,fa:16:3e:f0:50:53 --conn-limit=30 --expect-npc 192.168.208.28 192.168.208.19
|
||||||
|
```
|
||||||
|
|
||||||
|
### Consensus mode (default mode)
|
||||||
|
|
||||||
|
This mode is best to use on static size cluster
|
||||||
|
|
||||||
|
### Seed mode
|
||||||
|
|
||||||
|
This mode is best to use on dynamic size cluster
|
||||||
|
|
||||||
|
The seed mode also allows multi-clouds and hybrid on-premise/cloud clusters deployement.
|
||||||
|
|
||||||
|
* Switch from consensus mode to seed mode
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
weave_mode_seed: true
|
||||||
|
```
|
||||||
|
|
||||||
|
These two variables are only used when `weave_mode_seed` is set to `true` (**/!\ do not manually change these values**)
|
||||||
|
|
||||||
|
```
|
||||||
|
# In file ./inventory/sample/group_vars/k8s-cluster.yml
|
||||||
|
weave_seed: uninitialized
|
||||||
|
weave_peers: uninitialized
|
||||||
|
```
|
||||||
|
|
||||||
|
The first variable, `weave_seed`, contains the initial nodes of the weave network
|
||||||
|
|
||||||
|
The second variable, `weave_peers`, saves the IPs of all nodes joined to the weave network
|
||||||
|
|
||||||
|
These two variables are used to connect a new node to the weave network. The new node needs to know the firsts nodes (seed) and the list of IPs of all nodes.
|
||||||
|
|
||||||
|
To reset these variables and reset the weave network set them to `uninitialized`
|
||||||
54
extra_playbooks/build-cephfs-provisioner.yml
Normal file
54
extra_playbooks/build-cephfs-provisioner.yml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- name: CephFS Provisioner | Install pip packages
|
||||||
|
pip:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
version: "{{ item.version }}"
|
||||||
|
state: "{{ item.state }}"
|
||||||
|
with_items:
|
||||||
|
- { state: "present", name: "docker", version: "2.7.0" }
|
||||||
|
- { state: "present", name: "docker-compose", version: "1.18.0" }
|
||||||
|
|
||||||
|
- name: CephFS Provisioner | Check Go version
|
||||||
|
shell: |
|
||||||
|
go version
|
||||||
|
ignore_errors: yes
|
||||||
|
register: go_version_result
|
||||||
|
|
||||||
|
- name: CephFS Provisioner | Install Go 1.9
|
||||||
|
shell: |
|
||||||
|
add-apt-repository -y ppa:gophers/archive
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y golang-1.9
|
||||||
|
ln -fs /usr/lib/go-1.9/bin/* /usr/local/bin/
|
||||||
|
when: 'go_version_result.rc != 0 or "go version go1.9" not in go_version_result.stdout'
|
||||||
|
|
||||||
|
- name: CephFS Provisioner | Check if image exists
|
||||||
|
shell: |
|
||||||
|
docker image list | grep 'cephfs-provisioner'
|
||||||
|
ignore_errors: yes
|
||||||
|
register: check_image_result
|
||||||
|
|
||||||
|
- block:
|
||||||
|
- name: CephFS Provisioner | Clone repo
|
||||||
|
git:
|
||||||
|
repo: https://github.com/kubernetes-incubator/external-storage.git
|
||||||
|
dest: "~/go/src/github.com/kubernetes-incubator"
|
||||||
|
version: 92295a30
|
||||||
|
clone: no
|
||||||
|
update: yes
|
||||||
|
|
||||||
|
- name: CephFS Provisioner | Build image
|
||||||
|
shell: |
|
||||||
|
cd ~/go/src/github.com/kubernetes-incubator/external-storage
|
||||||
|
REGISTRY=quay.io/kubespray/ VERSION=92295a30 make ceph/cephfs
|
||||||
|
|
||||||
|
- name: CephFS Provisioner | Push image
|
||||||
|
docker_image:
|
||||||
|
name: quay.io/kubespray/cephfs-provisioner:92295a30
|
||||||
|
push: yes
|
||||||
|
retries: 10
|
||||||
|
|
||||||
|
when: check_image_result.rc != 0
|
||||||
1
extra_playbooks/inventory
Symbolic link
1
extra_playbooks/inventory
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../inventory
|
||||||
1
extra_playbooks/roles
Symbolic link
1
extra_playbooks/roles
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../roles
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user