mirror of
https://github.com/kubernetes-sigs/kubespray.git
synced 2026-02-04 02:58:17 -03:30
Compare commits
859 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
acae0fe4a3 | ||
|
|
2670eefcd4 | ||
|
|
c0cae9e8a0 | ||
|
|
f8cf6b4f7c | ||
|
|
a29182a010 | ||
|
|
1cfe0beac0 | ||
|
|
798f90c4d5 | ||
|
|
fac4334950 | ||
|
|
f8d44a8a88 | ||
|
|
1136a94a6e | ||
|
|
fd20e0de90 | ||
|
|
a1150dc334 | ||
|
|
b4d06ff8dd | ||
|
|
7581705007 | ||
|
|
5a5707159a | ||
|
|
742a1681ce | ||
|
|
fba9b9cb65 | ||
|
|
61b2d7548a | ||
|
|
80828a7c77 | ||
|
|
f5af86c9d5 | ||
|
|
58acbe7caf | ||
|
|
355b92d7ba | ||
|
|
d42e4f2344 | ||
|
|
907e43b9d5 | ||
|
|
fb467df47c | ||
|
|
48beef25fa | ||
|
|
a3f568fc64 | ||
|
|
57ee304260 | ||
|
|
0794a866a7 | ||
|
|
49e4d344da | ||
|
|
21a9dea99f | ||
|
|
6e505c0c3f | ||
|
|
e9a294fd9c | ||
|
|
44d851d5bb | ||
|
|
5ed03ce7f0 | ||
|
|
c1b9660ec8 | ||
|
|
c2c334d22f | ||
|
|
ed5c848473 | ||
|
|
f144fd1ed3 | ||
|
|
e96557f410 | ||
|
|
ac96d5ccf0 | ||
|
|
b2af19471e | ||
|
|
6805d0ff2b | ||
|
|
6e1de9d820 | ||
|
|
d27ca7854f | ||
|
|
c4e57477fb | ||
|
|
f1c59a91a1 | ||
|
|
74c573ef04 | ||
|
|
5f082bc0e5 | ||
|
|
0e3b7127b5 | ||
|
|
f4638c7580 | ||
|
|
8b0b500c89 | ||
|
|
04746fc4d8 | ||
|
|
463ef3f8bc | ||
|
|
5e2f78424f | ||
|
|
3889c2e01c | ||
|
|
1887e984a0 | ||
|
|
a495bbc1db | ||
|
|
cd429d3654 | ||
|
|
771aef0b44 | ||
|
|
f7ef452d8a | ||
|
|
0f64f8db90 | ||
|
|
c04a6254b9 | ||
|
|
485e17d6ed | ||
|
|
952ab03d2a | ||
|
|
bbb524018e | ||
|
|
859c08620b | ||
|
|
f6cd42e6e0 | ||
|
|
61ee67d612 | ||
|
|
939c1def5d | ||
|
|
b7ab80e8ea | ||
|
|
b69d4b0ecc | ||
|
|
2f437d7452 | ||
|
|
d761216ec1 | ||
|
|
088e9be931 | ||
|
|
32ecac6464 | ||
|
|
7760c3e4aa | ||
|
|
3cfb76e57f | ||
|
|
e1faeb0f6c | ||
|
|
25bff851dd | ||
|
|
3a39904011 | ||
|
|
7e1fbfba64 | ||
|
|
a52064184e | ||
|
|
b4a1ba828a | ||
|
|
c8c6105ee2 | ||
|
|
0b49eeeba3 | ||
|
|
b0830f0cd7 | ||
|
|
565d4a53b0 | ||
|
|
9624662bf6 | ||
|
|
8195957461 | ||
|
|
02fed4a082 | ||
|
|
34ecf4ea51 | ||
|
|
a422ad0d50 | ||
|
|
096d96e344 | ||
|
|
e61310bc89 | ||
|
|
111ca9584e | ||
|
|
7d35c4592c | ||
|
|
3e8386cbf3 | ||
|
|
4354162067 | ||
|
|
a62a444229 | ||
|
|
f6b72fa830 | ||
|
|
9667e8615f | ||
|
|
026da060f2 | ||
|
|
3feab1cb2d | ||
|
|
804e9a09c0 | ||
|
|
4c6829513c | ||
|
|
4038954f96 | ||
|
|
52a6dd5427 | ||
|
|
c301dd5d94 | ||
|
|
7cb7eee29d | ||
|
|
a52e1069ce | ||
|
|
a8e5002aeb | ||
|
|
c515a351c6 | ||
|
|
7777b30693 | ||
|
|
d04fbf3f78 | ||
|
|
54207877bd | ||
|
|
3c6b1480b8 | ||
|
|
b075960e3b | ||
|
|
85596c2610 | ||
|
|
0613e3c24d | ||
|
|
ee5f009b95 | ||
|
|
d76816d043 | ||
|
|
45274560ec | ||
|
|
02a8e78902 | ||
|
|
8f3d9e93ce | ||
|
|
a244aca6a4 | ||
|
|
5ae85b9de5 | ||
|
|
d176818c44 | ||
|
|
aeec0f9a71 | ||
|
|
08a02af833 | ||
|
|
cf26585cff | ||
|
|
3f4a375ac4 | ||
|
|
cc632f2713 | ||
|
|
5ebc9a380c | ||
|
|
6453650895 | ||
|
|
9cb12cf250 | ||
|
|
68e8d74545 | ||
|
|
fc054e21f6 | ||
|
|
3256f4bc0f | ||
|
|
0e9ad8f2c7 | ||
|
|
efbb5b2db3 | ||
|
|
85ed4157ff | ||
|
|
a43569c8a5 | ||
|
|
e771d0ea39 | ||
|
|
9073eba405 | ||
|
|
a5cd73d047 | ||
|
|
a0b1eda1d0 | ||
|
|
ad80e09ac5 | ||
|
|
77e5171679 | ||
|
|
0c66418dad | ||
|
|
45a9eac7d2 | ||
|
|
838adf7475 | ||
|
|
fa05d15093 | ||
|
|
1122740bd7 | ||
|
|
f877278075 | ||
|
|
cbaa6abdd0 | ||
|
|
76a4803292 | ||
|
|
b286b2eb31 | ||
|
|
295103adc0 | ||
|
|
d31c040dc0 | ||
|
|
bfff06d402 | ||
|
|
21d3d75827 | ||
|
|
2c3538981a | ||
|
|
30a9899262 | ||
|
|
dd10b8a27c | ||
|
|
dbf13290f5 | ||
|
|
f9ff93c606 | ||
|
|
df476b0088 | ||
|
|
56664b34a6 | ||
|
|
efb45733de | ||
|
|
0cbc3d8df6 | ||
|
|
27b4e61c9f | ||
|
|
069606947c | ||
|
|
6ae6b7cfcd | ||
|
|
d197ce230f | ||
|
|
c6cb0d3984 | ||
|
|
00cfead9bb | ||
|
|
20b1e4db0b | ||
|
|
a098a32f7d | ||
|
|
9ee9a1033f | ||
|
|
eb904668b2 | ||
|
|
75b69876a3 | ||
|
|
08d9d24320 | ||
|
|
c7d61af332 | ||
|
|
5f7607412b | ||
|
|
403fea39f7 | ||
|
|
f2a4619c57 | ||
|
|
712872efba | ||
|
|
8cbf3fe5f8 | ||
|
|
02137f8cee | ||
|
|
43ea281a7f | ||
|
|
0006e5ab45 | ||
|
|
d821448e2f | ||
|
|
3bd46f7ac8 | ||
|
|
ebf9daf73e | ||
|
|
2ba66f0b26 | ||
|
|
0afadb9149 | ||
|
|
19d0159e33 | ||
|
|
d4f15ab402 | ||
|
|
527e030283 | ||
|
|
634e6a381c | ||
|
|
042d094ce7 | ||
|
|
3cc1491833 | ||
|
|
d19e6dec7a | ||
|
|
6becfc52a8 | ||
|
|
a2cbbc5c4f | ||
|
|
10173525d8 | ||
|
|
ccdb72a422 | ||
|
|
df96617d3c | ||
|
|
09aa3e0e79 | ||
|
|
a673e97f02 | ||
|
|
43e86921e0 | ||
|
|
ad58e08a41 | ||
|
|
0bfc2d0f2f | ||
|
|
475a42767a | ||
|
|
ce4eefff6a | ||
|
|
82b247d1a4 | ||
|
|
a21eb036ee | ||
|
|
9c1701f2aa | ||
|
|
fd17c37feb | ||
|
|
cde5451e79 | ||
|
|
ca9ea097df | ||
|
|
b84cc14694 | ||
|
|
a84175b3b9 | ||
|
|
438b4e9625 | ||
|
|
a510e7b8f3 | ||
|
|
e16ebcad6e | ||
|
|
e91e58aec9 | ||
|
|
3629b9051d | ||
|
|
ef919d963b | ||
|
|
4545114408 | ||
|
|
9ed32b9dd0 | ||
|
|
45dbe6d542 | ||
|
|
bff955ff7e | ||
|
|
80c0e747a7 | ||
|
|
617edda9ba | ||
|
|
7ab04b2e73 | ||
|
|
e89056a614 | ||
|
|
97ebbb9672 | ||
|
|
c02213e4af | ||
|
|
73e0aeb4ca | ||
|
|
a1ec6f401c | ||
|
|
5337d37a1c | ||
|
|
d92d955aeb | ||
|
|
7ac84d386c | ||
|
|
8397baa700 | ||
|
|
2d65554cb9 | ||
|
|
64e40d471c | ||
|
|
c5ea29649b | ||
|
|
410438a0e3 | ||
|
|
fbaef7e60f | ||
|
|
017a813621 | ||
|
|
4c891b8bb0 | ||
|
|
948d9bdadb | ||
|
|
b7258ec3bb | ||
|
|
93cb5a5bd6 | ||
|
|
d8f46c4410 | ||
|
|
d0757ccc5e | ||
|
|
f4f730bd8a | ||
|
|
f5e27f1a21 | ||
|
|
bb6415ddc4 | ||
|
|
2b6179841b | ||
|
|
e877cd2874 | ||
|
|
203ddfcd43 | ||
|
|
09847567ae | ||
|
|
732ae69d22 | ||
|
|
2b10376339 | ||
|
|
9667ac3baf | ||
|
|
b5be335db3 | ||
|
|
d33945780d | ||
|
|
5f4cc3e1de | ||
|
|
ec567bd53c | ||
|
|
aeadaa1184 | ||
|
|
2f0f0006e3 | ||
|
|
de047a2b8c | ||
|
|
86a35652bb | ||
|
|
6ae70e03cb | ||
|
|
2c532cb74d | ||
|
|
779f20d64e | ||
|
|
89ae9f1f88 | ||
|
|
ed1ab11001 | ||
|
|
d2e010cbe1 | ||
|
|
a44a0990f5 | ||
|
|
2f88c9eefe | ||
|
|
60f1936a62 | ||
|
|
ee15f99dd7 | ||
|
|
b0ee27ba46 | ||
|
|
067bbaa473 | ||
|
|
c07d60bc90 | ||
|
|
29fd957352 | ||
|
|
ef10ce04e2 | ||
|
|
f0269b28f4 | ||
|
|
0a7c6eb9dc | ||
|
|
3f0c13af8a | ||
|
|
fcd78eb1f7 | ||
|
|
17dfae6d4e | ||
|
|
e414c25fd7 | ||
|
|
34a71554ae | ||
|
|
3b1a196c75 | ||
|
|
105dbf471e | ||
|
|
d4d9f27a8d | ||
|
|
68df0d4909 | ||
|
|
9c572fe54b | ||
|
|
245e05ce61 | ||
|
|
f4ec2d18e5 | ||
|
|
4124d84c00 | ||
|
|
3c713a3f53 | ||
|
|
89e570493a | ||
|
|
16674774c7 | ||
|
|
0180ad7f38 | ||
|
|
bfd1ea1da1 | ||
|
|
3eacd0c871 | ||
|
|
d587270293 | ||
|
|
3eb13e83cf | ||
|
|
df761713aa | ||
|
|
de50f37fea | ||
|
|
bad6076905 | ||
|
|
c2bd76a22e | ||
|
|
010fe30b53 | ||
|
|
e5779ab786 | ||
|
|
71e14a13b4 | ||
|
|
491074aab1 | ||
|
|
54af533b31 | ||
|
|
4f13043d14 | ||
|
|
6a5df4d999 | ||
|
|
d41602088b | ||
|
|
f3a0f73588 | ||
|
|
be1e1b41bd | ||
|
|
fd30131dc2 | ||
|
|
5122697f0b | ||
|
|
b7bf502e02 | ||
|
|
3f70e3a843 | ||
|
|
cae2982d81 | ||
|
|
b638c89556 | ||
|
|
9bc51bd0e2 | ||
|
|
408b4f3f42 | ||
|
|
d818ac1d59 | ||
|
|
bd1c764a1a | ||
|
|
8f377ad8bd | ||
|
|
df3e11bdb8 | ||
|
|
97dabbe997 | ||
|
|
5a7a3f6d4a | ||
|
|
b4327fdc99 | ||
|
|
10f924a617 | ||
|
|
3dd6a01c8b | ||
|
|
585afef945 | ||
|
|
bdc65990e1 | ||
|
|
f2e4ffcac2 | ||
|
|
ae66b6e648 | ||
|
|
923057c1a8 | ||
|
|
0f6e08d34f | ||
|
|
4889a3e2e1 | ||
|
|
39d87a96aa | ||
|
|
e7c03ba66a | ||
|
|
08822ec684 | ||
|
|
6463a01e04 | ||
|
|
0cf1850465 | ||
|
|
1418fb394b | ||
|
|
e4eda88ca9 | ||
|
|
71a3c97d6f | ||
|
|
1c3d2924ae | ||
|
|
a11b9d28bd | ||
|
|
b54eb609bf | ||
|
|
dc8ff413f9 | ||
|
|
f8ffa1601d | ||
|
|
da01bc1fbb | ||
|
|
a2079a9ca9 | ||
|
|
bbc8c09753 | ||
|
|
a627299468 | ||
|
|
e5fdc63bdd | ||
|
|
fe83e70074 | ||
|
|
46c177b982 | ||
|
|
1df50adc1c | ||
|
|
b6cd9a4c4b | ||
|
|
2333ec4d1f | ||
|
|
85a8a54d3e | ||
|
|
7294a22901 | ||
|
|
f4b7474ade | ||
|
|
9428321607 | ||
|
|
882544446a | ||
|
|
73160c9b90 | ||
|
|
2184d6a3ff | ||
|
|
6e35895b44 | ||
|
|
8009ff8537 | ||
|
|
9bf792ce0b | ||
|
|
f05aaeb329 | ||
|
|
1bdf34e7dc | ||
|
|
cd25bfca91 | ||
|
|
1b621ab81c | ||
|
|
cb2e5ac776 | ||
|
|
8ce32eb3e1 | ||
|
|
aae0314bda | ||
|
|
35d5248d41 | ||
|
|
0ccc2555d3 | ||
|
|
b26a711e96 | ||
|
|
2218a052b2 | ||
|
|
40f419ca54 | ||
|
|
f742fc3dd1 | ||
|
|
33fbcc56d6 | ||
|
|
61d05dea58 | ||
|
|
8a821060a3 | ||
|
|
0d44599a63 | ||
|
|
8e29b08070 | ||
|
|
b6c3e61603 | ||
|
|
dc08b75c6a | ||
|
|
5420fa942e | ||
|
|
1ee33d3a8d | ||
|
|
61dab8dc0b | ||
|
|
0022a2b29e | ||
|
|
b2a27ed089 | ||
|
|
d8ae50800a | ||
|
|
43fa72b7b7 | ||
|
|
36b62b7270 | ||
|
|
73204c868d | ||
|
|
2ee889843a | ||
|
|
74b78e75a1 | ||
|
|
6905edbeb6 | ||
|
|
6c69da1573 | ||
|
|
e776dfd800 | ||
|
|
95bf380d07 | ||
|
|
2a61ad1b57 | ||
|
|
80703010bd | ||
|
|
e88c10670e | ||
|
|
2a2953c674 | ||
|
|
1054f37765 | ||
|
|
f77257cf79 | ||
|
|
f004cc07df | ||
|
|
065a4da72d | ||
|
|
98c7f2eb13 | ||
|
|
d332502d3d | ||
|
|
a7bf7867d7 | ||
|
|
c63cda7c21 | ||
|
|
caab0cdf27 | ||
|
|
1191876ae8 | ||
|
|
fa51a589ef | ||
|
|
3f274115b0 | ||
|
|
3b0918981e | ||
|
|
a327dfeed7 | ||
|
|
d8cef34d6c | ||
|
|
6fb6947feb | ||
|
|
db8173da28 | ||
|
|
bcdfb3cfb0 | ||
|
|
79aeb10431 | ||
|
|
5fd2b151b9 | ||
|
|
3c107ef4dc | ||
|
|
a5f93d6013 | ||
|
|
38338e848d | ||
|
|
e9518072a8 | ||
|
|
10dbd0afbd | ||
|
|
e22f938ae5 | ||
|
|
1dce56e2f8 | ||
|
|
1f0b2eac12 | ||
|
|
d9539e0f27 | ||
|
|
0909368339 | ||
|
|
091b634ea1 | ||
|
|
d18804b0bb | ||
|
|
a8b5b856d1 | ||
|
|
1d2a18b355 | ||
|
|
4a59340182 | ||
|
|
aa33613b98 | ||
|
|
cf042b2a4c | ||
|
|
65c86377fc | ||
|
|
96372c15e2 | ||
|
|
f365b32c60 | ||
|
|
5af2c42bde | ||
|
|
c0400e9db5 | ||
|
|
f7447837c5 | ||
|
|
a4dbee3e38 | ||
|
|
fb7899aa06 | ||
|
|
6d54d9f49a | ||
|
|
6546869c42 | ||
|
|
aa79a02f9c | ||
|
|
447febcdd6 | ||
|
|
61732847b6 | ||
|
|
fcd9d97f10 | ||
|
|
b6b5d52f78 | ||
|
|
4b6f29d5e1 | ||
|
|
f5d5230034 | ||
|
|
8dc19374cc | ||
|
|
a8f2af0503 | ||
|
|
d8a2941e9e | ||
|
|
55b6d0bbdd | ||
|
|
a3c044b657 | ||
|
|
4a2abc1a46 | ||
|
|
410c78f2e5 | ||
|
|
3b5830a1cf | ||
|
|
ab7df10a7d | ||
|
|
93663e987c | ||
|
|
6114266b84 | ||
|
|
97f96a6376 | ||
|
|
58062be2a3 | ||
|
|
031cf565ec | ||
|
|
5ec4efe88e | ||
|
|
e02aae71a1 | ||
|
|
1f9f885379 | ||
|
|
80509673d2 | ||
|
|
b902110d75 | ||
|
|
2c23027794 | ||
|
|
15589dd88f | ||
|
|
1a7f52c889 | ||
|
|
24cbf2287c | ||
|
|
a56d9de502 | ||
|
|
95e14ffb54 | ||
|
|
6139ee3add | ||
|
|
f0c0390646 | ||
|
|
e7a1949d85 | ||
|
|
ff8cb46bb9 | ||
|
|
399cb9707a | ||
|
|
6d9cd2d720 | ||
|
|
622537bd33 | ||
|
|
9169f840c2 | ||
|
|
79996b557b | ||
|
|
be8e5e1fdc | ||
|
|
bb0c3537cb | ||
|
|
36a5143478 | ||
|
|
7b86b87dca | ||
|
|
53affb9bc0 | ||
|
|
0fe2b66097 | ||
|
|
385f7f6e75 | ||
|
|
9f1e3db906 | ||
|
|
b63d900625 | ||
|
|
ac295de64c | ||
|
|
111571b67a | ||
|
|
a4bce333a3 | ||
|
|
c53a6eca86 | ||
|
|
7c2785e083 | ||
|
|
aab4149ab0 | ||
|
|
89a4b92753 | ||
|
|
5414a410bd | ||
|
|
ad796d188d | ||
|
|
de8cd5cd7f | ||
|
|
cc93c4fe12 | ||
|
|
c456a311d6 | ||
|
|
ed4b4b8482 | ||
|
|
8e4e3998dd | ||
|
|
8d9f207836 | ||
|
|
2a3164e040 | ||
|
|
f10d1327d4 | ||
|
|
d314174149 | ||
|
|
9885fe73dc | ||
|
|
f2cf323ecf | ||
|
|
cf4f2b4f14 | ||
|
|
fbc13ea6dc | ||
|
|
b8bc8eee41 | ||
|
|
11380769cd | ||
|
|
ee62c99eb1 | ||
|
|
843d439898 | ||
|
|
8d5da5cfca | ||
|
|
5a2c75a3cb | ||
|
|
c1e4cef75b | ||
|
|
5d73b9ccc5 | ||
|
|
9efe1fe09d | ||
|
|
4bbec963e6 | ||
|
|
348fc5b109 | ||
|
|
101864c050 | ||
|
|
fe150d4e4d | ||
|
|
048ac264a3 | ||
|
|
add7570a94 | ||
|
|
db77bd9588 | ||
|
|
768fe05eea | ||
|
|
1c48a001df | ||
|
|
a7276901a3 | ||
|
|
b0fa189b3c | ||
|
|
cc57152cc0 | ||
|
|
046f3eebcb | ||
|
|
ea874899c7 | ||
|
|
1782d19e1f | ||
|
|
e2476fbd0b | ||
|
|
07cd81ef58 | ||
|
|
92f542938c | ||
|
|
9df2306ee9 | ||
|
|
495d0b659a | ||
|
|
a2f8f17270 | ||
|
|
0e2329b59e | ||
|
|
7c011b14df | ||
|
|
ad68b23d8a | ||
|
|
670d977dfb | ||
|
|
70143d87bf | ||
|
|
e21ca5433a | ||
|
|
68ad4ff4d9 | ||
|
|
d7b0ff3de6 | ||
|
|
725f9ea3bd | ||
|
|
a9684648ab | ||
|
|
6b1dfa4ae6 | ||
|
|
9cc73bdf08 | ||
|
|
114ab5e4e6 | ||
|
|
77ebf4531c | ||
|
|
1551fe01f9 | ||
|
|
29874baf8a | ||
|
|
e6fe9d5807 | ||
|
|
81317505eb | ||
|
|
d57c27ffcf | ||
|
|
8d7b25d4f0 | ||
|
|
8e809aed01 | ||
|
|
b4c87c669b | ||
|
|
bca704e7e9 | ||
|
|
d50eb60827 | ||
|
|
dbd9aaf1ea | ||
|
|
d20d5e648f | ||
|
|
96640e68e2 | ||
|
|
3e007df97c | ||
|
|
06584ee3aa | ||
|
|
26e3142c95 | ||
|
|
33585fa673 | ||
|
|
665ce82d71 | ||
|
|
fb78bfaaae | ||
|
|
b4ce221002 | ||
|
|
444b1dafdc | ||
|
|
d6174b22e9 | ||
|
|
c75f394707 | ||
|
|
94ce99eb0a | ||
|
|
0515814e0c | ||
|
|
c87f4f613e | ||
|
|
f12e9fa22a | ||
|
|
3ca11b70c4 | ||
|
|
1cfaf927c9 | ||
|
|
45135ad3e4 | ||
|
|
9c06dd2863 | ||
|
|
b2088b72dd | ||
|
|
4e721bfd9d | ||
|
|
f52ed9f91e | ||
|
|
88f3b86410 | ||
|
|
3117858dcd | ||
|
|
8c36915ea0 | ||
|
|
5176e5c968 | ||
|
|
e95c733a81 | ||
|
|
15c2919ecc | ||
|
|
774f4dbbf7 | ||
|
|
b1e852a785 | ||
|
|
42ea4d2cfd | ||
|
|
9fd14cb6ea | ||
|
|
4e34803b1e | ||
|
|
7abcf6e0b9 | ||
|
|
e5ad0836bc | ||
|
|
2c50f20429 | ||
|
|
a15d626771 | ||
|
|
fd9b26675e | ||
|
|
eb33f085b6 | ||
|
|
fb774d4317 | ||
|
|
459bee6d2c | ||
|
|
6e080cd9b0 | ||
|
|
8a5ba6b20c | ||
|
|
c3ec3ff902 | ||
|
|
284a21012c | ||
|
|
7897c34ba3 | ||
|
|
8cc84e132a | ||
|
|
00ad151186 | ||
|
|
4265149463 | ||
|
|
ee8d6ab4fc | ||
|
|
a80745b5bd | ||
|
|
bd3f2d5cef | ||
|
|
e9c591e6de | ||
|
|
710d5ae48e | ||
|
|
fc769eb870 | ||
|
|
eec2ed5809 | ||
|
|
f7dd20f21c | ||
|
|
bfc9bcb8c7 | ||
|
|
8eb26c21be | ||
|
|
3c66e4cdba | ||
|
|
f0f2b81276 | ||
|
|
45ed6de315 | ||
|
|
c9290182be | ||
|
|
893538d8e6 | ||
|
|
246c8209c1 | ||
|
|
36fe2cb5ea | ||
|
|
9d6cc3a8d5 | ||
|
|
8870178a2d | ||
|
|
b0079ccd77 | ||
|
|
1772d122b2 | ||
|
|
756ae926ba | ||
|
|
2c1db56213 | ||
|
|
d672cef21c | ||
|
|
27e239c8d6 | ||
|
|
f1d7af11ee | ||
|
|
59a097b255 | ||
|
|
d40783022b | ||
|
|
7a3a473ccf | ||
|
|
2cdf752481 | ||
|
|
26f93feb2d | ||
|
|
d4aba0af48 | ||
|
|
42d12afbc6 | ||
|
|
022468ae3e | ||
|
|
3bb42cc66a | ||
|
|
8b5b27bb51 | ||
|
|
7328e0e1ac | ||
|
|
eeaf2ea4cf | ||
|
|
42eb8e4663 | ||
|
|
c13d0db0cc | ||
|
|
dba2026002 | ||
|
|
a62f74259c | ||
|
|
a2331fec55 | ||
|
|
b6872a0be3 | ||
|
|
bc7a73ca2c | ||
|
|
c405944e9d | ||
|
|
7eab889c07 | ||
|
|
bb55f68f95 | ||
|
|
658543c949 | ||
|
|
5b382668f5 | ||
|
|
b7692fad09 | ||
|
|
fbdda81515 | ||
|
|
7484888e42 | ||
|
|
f783a638a3 | ||
|
|
2d18e19263 | ||
|
|
ff7d489f2d | ||
|
|
6d29a5981c | ||
|
|
10b75d1d51 | ||
|
|
aa447585c4 | ||
|
|
f6c32c3ea3 | ||
|
|
d208896c46 | ||
|
|
08506f5139 | ||
|
|
2c4b11f321 | ||
|
|
d890d2f277 | ||
|
|
793f3990a0 | ||
|
|
9d439d2e5b | ||
|
|
db03f17486 | ||
|
|
dff78f616e | ||
|
|
d3a4d8dc24 | ||
|
|
dc58159d16 | ||
|
|
b60d5647a2 | ||
|
|
2bcfb3fea3 | ||
|
|
66f27ed1f3 | ||
|
|
cb84b93930 | ||
|
|
32a5453473 | ||
|
|
97d126ac8b | ||
|
|
deea7bb87b | ||
|
|
1bd1825ecb | ||
|
|
20e36191bb | ||
|
|
769566f36c | ||
|
|
ddd230485b | ||
|
|
5ee0cbaa42 | ||
|
|
ff675d40f9 | ||
|
|
0eebe43c08 | ||
|
|
069636e5b4 | ||
|
|
a03540dabc | ||
|
|
f6d69d0a00 | ||
|
|
cc2f26b8e9 | ||
|
|
3e687bbe9a | ||
|
|
c5113d3352 | ||
|
|
4d9712a3ef | ||
|
|
5b9b2c0973 | ||
|
|
a5af87758a | ||
|
|
8b11de5425 | ||
|
|
ff928e0e66 | ||
|
|
952191db99 | ||
|
|
61adca2a6d | ||
|
|
9872ed4bb2 | ||
|
|
3aa2d56da9 | ||
|
|
6a398724b6 | ||
|
|
af3823bced | ||
|
|
1e601bb2ef | ||
|
|
e4d240b1b7 | ||
|
|
e3470b28c5 | ||
|
|
e9a48770a7 | ||
|
|
0322b69f63 | ||
|
|
e587e82f7f | ||
|
|
5f5199bf53 | ||
|
|
876c4df1b6 | ||
|
|
e68ec257a3 | ||
|
|
216e0b2a52 | ||
|
|
ab0ff2ab3c | ||
|
|
5cd65f9c45 | ||
|
|
4e47c267fb | ||
|
|
cb47bbf753 | ||
|
|
c41d200a95 | ||
|
|
771d537ff3 | ||
|
|
8ca1f4ce44 | ||
|
|
625ec529ff | ||
|
|
caa81f3ac2 | ||
|
|
8092f57695 | ||
|
|
965a1234d3 | ||
|
|
15bc445a9c | ||
|
|
bb72de0dc9 | ||
|
|
6da0ecfa55 | ||
|
|
1ccc10baf8 | ||
|
|
45c2900e71 | ||
|
|
eb583dd2f3 | ||
|
|
f6233ffc9a | ||
|
|
46ee9faca9 | ||
|
|
f320b79c0c | ||
|
|
6cc05c103a | ||
|
|
88577b9889 | ||
|
|
5821f9748a | ||
|
|
c58bd33af7 | ||
|
|
cf7c60029b | ||
|
|
046e315bfd | ||
|
|
251800eb16 | ||
|
|
fe16fecd8f | ||
|
|
9ea9604b3f | ||
|
|
a32cd85eb7 | ||
|
|
95b460ae94 | ||
|
|
57e467c03c | ||
|
|
764a2fd5a8 | ||
|
|
d197130148 | ||
|
|
39d68822ed | ||
|
|
4ece73d432 | ||
|
|
60a217766f | ||
|
|
309240cd6f | ||
|
|
6b0d26ddf0 | ||
|
|
5aa8df163e | ||
|
|
881dc8172c | ||
|
|
aff441a01f | ||
|
|
44a14d0b3e | ||
|
|
f106bf5bc4 | ||
|
|
39b8336f3f | ||
|
|
a6bc284abd | ||
|
|
6b7b8a2303 | ||
|
|
8f20d90f88 | ||
|
|
047f098660 | ||
|
|
3b2554217b | ||
|
|
672d50393c | ||
|
|
d4467ab1c6 | ||
|
|
ebeb57ee7c | ||
|
|
f9355ea14d | ||
|
|
2ca6819cdf | ||
|
|
437372021d | ||
|
|
78ac01add7 | ||
|
|
3b3938c6a6 | ||
|
|
36fc05d2fd | ||
|
|
7abc747b56 | ||
|
|
9f976e568d | ||
|
|
9d7142f476 | ||
|
|
50f77cca1d | ||
|
|
33ebf124c4 | ||
|
|
03e162b342 | ||
|
|
d8b06f3e2f | ||
|
|
d6f206b5fd | ||
|
|
357a15ffd4 | ||
|
|
a3f892c76c | ||
|
|
2778ac61a4 | ||
|
|
c7b00caeaa | ||
|
|
7fe255e5bb | ||
|
|
93f7a26896 | ||
|
|
3d617fbf88 | ||
|
|
c59c3a1bcf | ||
|
|
4c0bf6225a | ||
|
|
b11662a887 | ||
|
|
11f1f71b3b | ||
|
|
0e9d1e09e3 | ||
|
|
65d2a3b0e5 | ||
|
|
8165da3f3d | ||
|
|
4b7347f1cd | ||
|
|
e6902d8ecc | ||
|
|
a5137affeb | ||
|
|
a423927ac9 | ||
|
|
31c2922752 | ||
|
|
7e81855e24 | ||
|
|
2510092599 | ||
|
|
6113a3f350 | ||
|
|
7d6fc1d680 | ||
|
|
91a101c855 | ||
|
|
1de127470f | ||
|
|
40de468413 | ||
|
|
c402feffbd | ||
|
|
f74d6b084b | ||
|
|
dd022f2dbc | ||
|
|
19928dea2b | ||
|
|
21273926ce | ||
|
|
c03bab3246 | ||
|
|
71347322d6 |
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
47
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
<!-- Thanks for filing an issue! Before hitting the button, please answer these questions.-->
|
||||||
|
|
||||||
|
**Is this a BUG REPORT or FEATURE REQUEST?** (choose one):
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If this is a BUG REPORT, please:
|
||||||
|
- Fill in as much of the template below as you can. If you leave out
|
||||||
|
information, we can't help you as well.
|
||||||
|
|
||||||
|
If this is a FEATURE REQUEST, please:
|
||||||
|
- Describe *in detail* the feature/behavior/change you'd like to see.
|
||||||
|
|
||||||
|
In both cases, be ready for followup questions, and please respond in a timely
|
||||||
|
manner. If we can't reproduce a bug or think a feature already exists, we
|
||||||
|
might close your issue. If we're wrong, PLEASE feel free to reopen it and
|
||||||
|
explain why.
|
||||||
|
-->
|
||||||
|
|
||||||
|
**Environment**:
|
||||||
|
- **Cloud provider or hardware configuration:**
|
||||||
|
|
||||||
|
- **OS (`printf "$(uname -srm)\n$(cat /etc/os-release)\n"`):**
|
||||||
|
|
||||||
|
- **Version of Ansible** (`ansible --version`):
|
||||||
|
|
||||||
|
|
||||||
|
**Kargo version (commit) (`git rev-parse --short HEAD`):**
|
||||||
|
|
||||||
|
|
||||||
|
**Network plugin used**:
|
||||||
|
|
||||||
|
|
||||||
|
**Copy of your inventory file:**
|
||||||
|
|
||||||
|
|
||||||
|
**Command used to invoke ansible**:
|
||||||
|
|
||||||
|
|
||||||
|
**Output of ansible run**:
|
||||||
|
<!-- We recommend using snippets services like https://gist.github.com/ etc. -->
|
||||||
|
|
||||||
|
**Anything else do we need to know**:
|
||||||
|
<!-- By running scripts/collect-info.yaml you can get a lot of useful informations.
|
||||||
|
Script can be started by:
|
||||||
|
ansible-playbook -i <inventory_file_path> -u <ssh_user> -e ansible_ssh_user=<ssh_user> -b --become-user=root -e dir=`pwd` scripts/collect-info.yaml
|
||||||
|
(If you using CoreOS remember to add '-e ansible_python_interpreter=/opt/bin/python').
|
||||||
|
After running this command you can find logs in `pwd`/logs.tar.gz. You can even upload somewhere entire file and paste link here.-->
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -1,5 +1,18 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
*.retry
|
*.retry
|
||||||
inventory/vagrant_ansible_inventory
|
inventory/vagrant_ansible_inventory
|
||||||
|
inventory/group_vars/fake_hosts.yml
|
||||||
|
inventory/host_vars/
|
||||||
temp
|
temp
|
||||||
.idea
|
.idea
|
||||||
|
.tox
|
||||||
|
.cache
|
||||||
|
*.egg-info
|
||||||
|
*.pyc
|
||||||
|
*.pyo
|
||||||
|
*.tfstate
|
||||||
|
*.tfstate.backup
|
||||||
|
**/*.sw[pon]
|
||||||
|
/ssh-bastion.conf
|
||||||
|
**/*.sw[pon]
|
||||||
|
vagrant/
|
||||||
|
|||||||
608
.gitlab-ci.yml
Normal file
608
.gitlab-ci.yml
Normal file
@@ -0,0 +1,608 @@
|
|||||||
|
stages:
|
||||||
|
- moderator
|
||||||
|
- unit-tests
|
||||||
|
- deploy-gce-part1
|
||||||
|
- deploy-gce-part2
|
||||||
|
- deploy-gce-special
|
||||||
|
|
||||||
|
variables:
|
||||||
|
FAILFASTCI_NAMESPACE: 'kargo-ci'
|
||||||
|
# DOCKER_HOST: tcp://localhost:2375
|
||||||
|
ANSIBLE_FORCE_COLOR: "true"
|
||||||
|
|
||||||
|
# asia-east1-a
|
||||||
|
# asia-northeast1-a
|
||||||
|
# europe-west1-b
|
||||||
|
# us-central1-a
|
||||||
|
# us-east1-b
|
||||||
|
# us-west1-a
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- pip install ansible==2.2.1.0
|
||||||
|
- pip install netaddr
|
||||||
|
- pip install apache-libcloud==0.20.1
|
||||||
|
- pip install boto==2.9.0
|
||||||
|
- mkdir -p /.ssh
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
|
||||||
|
.job: &job
|
||||||
|
tags:
|
||||||
|
- kubernetes
|
||||||
|
- docker
|
||||||
|
image: quay.io/ant31/kargo:master
|
||||||
|
|
||||||
|
.docker_service: &docker_service
|
||||||
|
services:
|
||||||
|
- docker:dind
|
||||||
|
|
||||||
|
.create_cluster: &create_cluster
|
||||||
|
<<: *job
|
||||||
|
<<: *docker_service
|
||||||
|
|
||||||
|
.gce_variables: &gce_variables
|
||||||
|
GCE_USER: travis
|
||||||
|
SSH_USER: $GCE_USER
|
||||||
|
TEST_ID: "$CI_PIPELINE_ID-$CI_BUILD_ID"
|
||||||
|
CONTAINER_ENGINE: docker
|
||||||
|
PRIVATE_KEY: $GCE_PRIVATE_KEY
|
||||||
|
GS_ACCESS_KEY_ID: $GS_KEY
|
||||||
|
GS_SECRET_ACCESS_KEY: $GS_SECRET
|
||||||
|
CLOUD_MACHINE_TYPE: "g1-small"
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES: "1"
|
||||||
|
ANSIBLE_CONFIG: ./tests/ansible.cfg
|
||||||
|
BOOTSTRAP_OS: none
|
||||||
|
DOWNLOAD_LOCALHOST: "false"
|
||||||
|
DOWNLOAD_RUN_ONCE: "false"
|
||||||
|
IDEMPOT_CHECK: "false"
|
||||||
|
RESET_CHECK: "false"
|
||||||
|
UPGRADE_TEST: "false"
|
||||||
|
RESOLVCONF_MODE: docker_dns
|
||||||
|
LOG_LEVEL: "-vv"
|
||||||
|
ETCD_DEPLOYMENT: "docker"
|
||||||
|
KUBELET_DEPLOYMENT: "docker"
|
||||||
|
VAULT_DEPLOYMENT: "docker"
|
||||||
|
WEAVE_CPU_LIMIT: "100m"
|
||||||
|
MAGIC: "ci check this"
|
||||||
|
|
||||||
|
.gce: &gce
|
||||||
|
<<: *job
|
||||||
|
<<: *docker_service
|
||||||
|
cache:
|
||||||
|
key: "$CI_BUILD_REF_NAME"
|
||||||
|
paths:
|
||||||
|
- downloads/
|
||||||
|
- $HOME/.cache
|
||||||
|
before_script:
|
||||||
|
- docker info
|
||||||
|
- pip install ansible==2.2.1.0
|
||||||
|
- pip install netaddr
|
||||||
|
- pip install apache-libcloud==0.20.1
|
||||||
|
- pip install boto==2.9.0
|
||||||
|
- mkdir -p /.ssh
|
||||||
|
- mkdir -p $HOME/.ssh
|
||||||
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
|
- echo $GCE_CREDENTIALS > $HOME/.ssh/gce.json
|
||||||
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
|
- ansible-playbook --version
|
||||||
|
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
script:
|
||||||
|
- pwd
|
||||||
|
- ls
|
||||||
|
- echo ${PWD}
|
||||||
|
- >
|
||||||
|
ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e cloud_machine_type=${CLOUD_MACHINE_TYPE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
|
||||||
|
# Check out latest tag if testing upgrade
|
||||||
|
# Uncomment when gitlab kargo repo has tags
|
||||||
|
#- test "${UPGRADE_TEST}" != "false" && git fetch --all && git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
|
||||||
|
- test "${UPGRADE_TEST}" != "false" && git checkout 031cf565ec3ccd3ebbe80eeef3454c3780e5c598 && pip install ansible==2.2.0
|
||||||
|
|
||||||
|
|
||||||
|
# Create cluster
|
||||||
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e cert_management=${CERT_MGMT:-script}
|
||||||
|
-e cloud_provider=gce
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e vault_deployment_type=${VAULT_DEPLOYMENT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml
|
||||||
|
|
||||||
|
# Repeat deployment if testing upgrade
|
||||||
|
- >
|
||||||
|
if [ "${UPGRADE_TEST}" != "false" ]; then
|
||||||
|
test "${UPGRADE_TEST}" == "basic" && PLAYBOOK="cluster.yml";
|
||||||
|
test "${UPGRADE_TEST}" == "graceful" && PLAYBOOK="upgrade-cluster.yml";
|
||||||
|
pip install ansible==2.2.1.0;
|
||||||
|
git checkout "${CI_BUILD_REF}";
|
||||||
|
ansible-playbook -i inventory/inventory.ini -b --become-user=root --private-key=${HOME}/.ssh/id_rsa -u $SSH_USER
|
||||||
|
${SSH_ARGS}
|
||||||
|
${LOG_LEVEL}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e ansible_ssh_user=${SSH_USER}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e cloud_provider=gce
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e weave_cpu_requests=${WEAVE_CPU_LIMIT}
|
||||||
|
-e weave_cpu_limit=${WEAVE_CPU_LIMIT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
$PLAYBOOK;
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Advanced DNS checks
|
||||||
|
- ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root --limit "all:!fake_hosts" tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
## Idempotency checks 1/5 (repeat deployment)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Idempotency checks 2/5 (Advanced DNS checks)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Idempotency checks 3/5 (reset deployment)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e reset_confirmation=yes
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
reset.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Idempotency checks 4/5 (redeploy after reset)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
--private-key=${HOME}/.ssh/id_rsa
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_localhost=${DOWNLOAD_LOCALHOST}
|
||||||
|
-e download_run_once=${DOWNLOAD_RUN_ONCE}
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
-e resolvconf_mode=${RESOLVCONF_MODE}
|
||||||
|
-e local_release_dir=${PWD}/downloads
|
||||||
|
-e etcd_deployment_type=${ETCD_DEPLOYMENT}
|
||||||
|
-e kubelet_deployment_type=${KUBELET_DEPLOYMENT}
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
cluster.yml;
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Idempotency checks 5/5 (Advanced DNS checks)
|
||||||
|
- >
|
||||||
|
if [ "${IDEMPOT_CHECK}" = "true" AND "${RESET_CHECK}" = "true" ]; then
|
||||||
|
ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH}
|
||||||
|
-u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root
|
||||||
|
--limit "all:!fake_hosts"
|
||||||
|
tests/testcases/040_check-network-adv.yml $LOG_LEVEL;
|
||||||
|
fi
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- >
|
||||||
|
ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_credentials_file=${HOME}/.ssh/gce.json
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Test matrix. Leave the comments for markup scripts.
|
||||||
|
.coreos_calico_sep_variables: &coreos_calico_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
||||||
|
CLOUD_REGION: us-west1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
|
||||||
|
.ubuntu_canal_ha_variables: &ubuntu_canal_ha_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLOUD_MACHINE_TYPE: "n1-standard-2"
|
||||||
|
UPGRADE_TEST: "basic"
|
||||||
|
CLUSTER_MODE: ha
|
||||||
|
UPGRADE_TEST: "graceful"
|
||||||
|
|
||||||
|
.rhel7_weave_variables: &rhel7_weave_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: rhel-7
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.centos7_flannel_variables: ¢os7_flannel_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: centos-7
|
||||||
|
CLOUD_REGION: us-west1-a
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.debian8_calico_variables: &debian8_calico_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
CLOUD_IMAGE: debian-8-kubespray
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
|
||||||
|
.coreos_canal_variables: &coreos_canal_variables
|
||||||
|
# stage: deploy-gce-part2
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: coreos-stable-1298-6-0-v20170315
|
||||||
|
CLOUD_REGION: us-east1-b
|
||||||
|
CLUSTER_MODE: default
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
IDEMPOT_CHECK: "true"
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
|
||||||
|
.rhel7_canal_sep_variables: &rhel7_canal_sep_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CLOUD_IMAGE: rhel-7
|
||||||
|
CLOUD_REGION: us-east1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
|
||||||
|
.ubuntu_weave_sep_variables: &ubuntu_weave_sep_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
IDEMPOT_CHECK: "false"
|
||||||
|
|
||||||
|
.centos7_calico_ha_variables: ¢os7_calico_ha_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: calico
|
||||||
|
DOWNLOAD_LOCALHOST: "true"
|
||||||
|
DOWNLOAD_RUN_ONCE: "true"
|
||||||
|
CLOUD_IMAGE: centos-7
|
||||||
|
CLOUD_REGION: europe-west1-b
|
||||||
|
CLUSTER_MODE: ha-scale
|
||||||
|
IDEMPOT_CHECK: "true"
|
||||||
|
|
||||||
|
.coreos_alpha_weave_ha_variables: &coreos_alpha_weave_ha_variables
|
||||||
|
# stage: deploy-gce-special
|
||||||
|
KUBE_NETWORK_PLUGIN: weave
|
||||||
|
CLOUD_IMAGE: coreos-alpha-1325-0-0-v20170216
|
||||||
|
CLOUD_REGION: us-west1-a
|
||||||
|
CLUSTER_MODE: ha-scale
|
||||||
|
BOOTSTRAP_OS: coreos
|
||||||
|
RESOLVCONF_MODE: host_resolvconf # This is required as long as the CoreOS stable channel uses docker < 1.12
|
||||||
|
|
||||||
|
.ubuntu_rkt_sep_variables: &ubuntu_rkt_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: flannel
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
ETCD_DEPLOYMENT: rkt
|
||||||
|
KUBELET_DEPLOYMENT: rkt
|
||||||
|
|
||||||
|
.ubuntu_vault_sep_variables: &ubuntu_vault_sep_variables
|
||||||
|
# stage: deploy-gce-part1
|
||||||
|
KUBE_NETWORK_PLUGIN: canal
|
||||||
|
CERT_MGMT: vault
|
||||||
|
CLOUD_IMAGE: ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION: us-central1-b
|
||||||
|
CLUSTER_MODE: separate
|
||||||
|
|
||||||
|
# Builds for PRs only (premoderated by unit-tests step) and triggers (auto)
|
||||||
|
coreos-calico-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_calico_sep_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
coreos-calico-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_calico_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
centos7-flannel:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_flannel_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
centos7-flannel-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_flannel_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
ubuntu-weave-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_weave_sep_variables
|
||||||
|
when: on_success
|
||||||
|
except: ['triggers']
|
||||||
|
only: [/^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-weave-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_weave_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
# More builds for PRs/merges (manual) and triggers (auto)
|
||||||
|
ubuntu-canal-ha:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-canal-ha-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_canal_ha_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
rhel7-weave:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_weave_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
rhel7-weave-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_weave_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
debian8-calico-upgrade:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_calico_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
debian8-calico-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *debian8_calico_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
coreos-canal:
|
||||||
|
stage: deploy-gce-part2
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_canal_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
coreos-canal-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_canal_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
rhel7-canal-sep:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_canal_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/,]
|
||||||
|
|
||||||
|
rhel7-canal-sep-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *rhel7_canal_sep_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
centos7-calico-ha:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_calico_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
centos7-calico-ha-triggers:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *centos7_calico_ha_variables
|
||||||
|
when: on_success
|
||||||
|
only: ['triggers']
|
||||||
|
|
||||||
|
# no triggers yet https://github.com/kubernetes-incubator/kargo/issues/613
|
||||||
|
coreos-alpha-weave-ha:
|
||||||
|
stage: deploy-gce-special
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *coreos_alpha_weave_ha_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-rkt-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_rkt_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
ubuntu-vault-sep:
|
||||||
|
stage: deploy-gce-part1
|
||||||
|
<<: *job
|
||||||
|
<<: *gce
|
||||||
|
variables:
|
||||||
|
<<: *gce_variables
|
||||||
|
<<: *ubuntu_vault_sep_variables
|
||||||
|
when: manual
|
||||||
|
except: ['triggers']
|
||||||
|
only: ['master', /^pr-.*$/]
|
||||||
|
|
||||||
|
# Premoderated with manual actions
|
||||||
|
ci-authorized:
|
||||||
|
<<: *job
|
||||||
|
stage: moderator
|
||||||
|
before_script:
|
||||||
|
- apt-get -y install jq
|
||||||
|
script:
|
||||||
|
- /bin/sh scripts/premoderator.sh
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
syntax-check:
|
||||||
|
<<: *job
|
||||||
|
stage: unit-tests
|
||||||
|
script:
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root cluster.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root upgrade-cluster.yml -vvv --syntax-check
|
||||||
|
- ansible-playbook -i inventory/local-tests.cfg -u root -e ansible_ssh_user=root -b --become-user=root reset.yml -vvv --syntax-check
|
||||||
|
except: ['triggers', 'master']
|
||||||
|
|
||||||
|
tox-inventory-builder:
|
||||||
|
stage: unit-tests
|
||||||
|
<<: *job
|
||||||
|
script:
|
||||||
|
- pip install tox
|
||||||
|
- cd contrib/inventory_builder && tox
|
||||||
|
when: manual
|
||||||
|
except: ['triggers', 'master']
|
||||||
149
.travis.yml
149
.travis.yml
@@ -1,149 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
|
|
||||||
git:
|
|
||||||
depth: 5
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
GCE_USER=travis
|
|
||||||
SSH_USER=$GCE_USER
|
|
||||||
TEST_ID=$TRAVIS_JOB_NUMBER
|
|
||||||
CONTAINER_ENGINE=docker
|
|
||||||
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
|
||||||
ANSIBLE_KEEP_REMOTE_FILES=1
|
|
||||||
matrix:
|
|
||||||
# Debian Jessie
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=debian-8-kubespray
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
# Centos 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=centos-7-sudo
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
|
|
||||||
# Redhat 7
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=rhel-7-sudo
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
|
|
||||||
# Ubuntu 16.04
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=us-central1-c
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1604-xenial
|
|
||||||
CLOUD_REGION=asia-east1-c
|
|
||||||
|
|
||||||
# Ubuntu 15.10
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=flannel
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=europe-west1-b
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=calico
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=us-central1-a
|
|
||||||
- >-
|
|
||||||
KUBE_NETWORK_PLUGIN=weave
|
|
||||||
CLOUD_IMAGE=ubuntu-1510-wily
|
|
||||||
CLOUD_REGION=us-east1-d
|
|
||||||
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
# Install Ansible.
|
|
||||||
- pip install --user boto -U
|
|
||||||
- pip install --user ansible
|
|
||||||
- pip install --user netaddr
|
|
||||||
- pip install --user apache-libcloud
|
|
||||||
|
|
||||||
cache:
|
|
||||||
- directories:
|
|
||||||
- $HOME/.cache/pip
|
|
||||||
- $HOME/.local
|
|
||||||
|
|
||||||
before_script:
|
|
||||||
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
|
||||||
- mkdir -p $HOME/.ssh
|
|
||||||
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
|
||||||
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
|
||||||
- chmod 400 $HOME/.ssh/id_rsa
|
|
||||||
- chmod 755 $HOME/.local/bin/ansible-playbook
|
|
||||||
- $HOME/.local/bin/ansible-playbook --version
|
|
||||||
- cp tests/ansible.cfg .
|
|
||||||
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
|
||||||
## Configure ansible deployment logs to be collected as an artifact. Enable when GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
||||||
# - $HOME/.local/bin/ansible-playbook -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/configure-logs.yaml
|
|
||||||
|
|
||||||
script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts -c local $LOG_LEVEL
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
|
|
||||||
# Create cluster
|
|
||||||
- "$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} cluster.yml"
|
|
||||||
# Tests Cases
|
|
||||||
## Test Master API
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
|
||||||
## Create a POD
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/020_check-create-pod.yml $LOG_LEVEL
|
|
||||||
## Ping the between 2 pod
|
|
||||||
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
|
||||||
## Collect env info, enable it once GCS configured, see https://docs.travis-ci.com/user/deployment/gcs
|
|
||||||
# - $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root scritps/collect-info.yaml
|
|
||||||
|
|
||||||
after_script:
|
|
||||||
- >
|
|
||||||
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
|
||||||
-e test_id=${TEST_ID}
|
|
||||||
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
|
||||||
-e gce_project_id=${GCE_PROJECT_ID}
|
|
||||||
-e gce_service_account_email=${GCE_ACCOUNT}
|
|
||||||
-e gce_pem_file=${HOME}/.ssh/gce
|
|
||||||
-e cloud_image=${CLOUD_IMAGE}
|
|
||||||
-e inventory_path=${PWD}/inventory/inventory.ini
|
|
||||||
-e cloud_region=${CLOUD_REGION}
|
|
||||||
161
.travis.yml.bak
Normal file
161
.travis.yml.bak
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
sudo: required
|
||||||
|
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
git:
|
||||||
|
depth: 5
|
||||||
|
|
||||||
|
env:
|
||||||
|
global:
|
||||||
|
GCE_USER=travis
|
||||||
|
SSH_USER=$GCE_USER
|
||||||
|
TEST_ID=$TRAVIS_JOB_NUMBER
|
||||||
|
CONTAINER_ENGINE=docker
|
||||||
|
PRIVATE_KEY=$GCE_PRIVATE_KEY
|
||||||
|
GS_ACCESS_KEY_ID=$GS_KEY
|
||||||
|
GS_SECRET_ACCESS_KEY=$GS_SECRET
|
||||||
|
ANSIBLE_KEEP_REMOTE_FILES=1
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
BOOTSTRAP_OS=none
|
||||||
|
matrix:
|
||||||
|
# Debian Jessie
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=debian-8-kubespray
|
||||||
|
CLOUD_REGION=asia-east1-a
|
||||||
|
CLUSTER_MODE=ha
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=debian-8-kubespray
|
||||||
|
CLOUD_REGION=europe-west1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
|
||||||
|
# Centos 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=flannel
|
||||||
|
CLOUD_IMAGE=centos-7
|
||||||
|
CLOUD_REGION=asia-northeast1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=centos-7
|
||||||
|
CLOUD_REGION=us-central1-b
|
||||||
|
CLUSTER_MODE=ha
|
||||||
|
|
||||||
|
# Redhat 7
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=rhel-7
|
||||||
|
CLOUD_REGION=us-east1-c
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
|
||||||
|
# CoreOS stable
|
||||||
|
#- >-
|
||||||
|
# KUBE_NETWORK_PLUGIN=weave
|
||||||
|
# CLOUD_IMAGE=coreos-stable
|
||||||
|
# CLOUD_REGION=europe-west1-b
|
||||||
|
# CLUSTER_MODE=ha
|
||||||
|
# BOOTSTRAP_OS=coreos
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=coreos-stable
|
||||||
|
CLOUD_REGION=us-west1-b
|
||||||
|
CLUSTER_MODE=default
|
||||||
|
BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
# Extra cases for separated roles
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=canal
|
||||||
|
CLOUD_IMAGE=rhel-7
|
||||||
|
CLOUD_REGION=asia-northeast1-b
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=weave
|
||||||
|
CLOUD_IMAGE=ubuntu-1604-xenial
|
||||||
|
CLOUD_REGION=europe-west1-d
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
- >-
|
||||||
|
KUBE_NETWORK_PLUGIN=calico
|
||||||
|
CLOUD_IMAGE=coreos-stable
|
||||||
|
CLOUD_REGION=us-central1-f
|
||||||
|
CLUSTER_MODE=separate
|
||||||
|
BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
matrix:
|
||||||
|
allow_failures:
|
||||||
|
- env: KUBE_NETWORK_PLUGIN=weave CLOUD_IMAGE=coreos-stable CLOUD_REGION=europe-west1-b CLUSTER_MODE=ha BOOTSTRAP_OS=coreos
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
# Install Ansible.
|
||||||
|
- pip install --user ansible
|
||||||
|
- pip install --user netaddr
|
||||||
|
# W/A https://github.com/ansible/ansible-modules-core/issues/5196#issuecomment-253766186
|
||||||
|
- pip install --user apache-libcloud==0.20.1
|
||||||
|
- pip install --user boto==2.9.0 -U
|
||||||
|
# Load cached docker images
|
||||||
|
- if [ -d /var/tmp/releases ]; then find /var/tmp/releases -type f -name "*.tar" | xargs -I {} sh -c "zcat {} | docker load"; fi
|
||||||
|
|
||||||
|
cache:
|
||||||
|
- directories:
|
||||||
|
- $HOME/.cache/pip
|
||||||
|
- $HOME/.local
|
||||||
|
- /var/tmp/releases
|
||||||
|
|
||||||
|
before_script:
|
||||||
|
- echo "RUN $TRAVIS_JOB_NUMBER $KUBE_NETWORK_PLUGIN $CONTAINER_ENGINE "
|
||||||
|
- mkdir -p $HOME/.ssh
|
||||||
|
- echo $PRIVATE_KEY | base64 -d > $HOME/.ssh/id_rsa
|
||||||
|
- echo $GCE_PEM_FILE | base64 -d > $HOME/.ssh/gce
|
||||||
|
- chmod 400 $HOME/.ssh/id_rsa
|
||||||
|
- chmod 755 $HOME/.local/bin/ansible-playbook
|
||||||
|
- $HOME/.local/bin/ansible-playbook --version
|
||||||
|
- cp tests/ansible.cfg .
|
||||||
|
- export PYPATH=$([ $BOOTSTRAP_OS = none ] && echo /usr/bin/python || echo /opt/bin/python)
|
||||||
|
# - "echo $HOME/.local/bin/ansible-playbook -i inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root -e '{\"cloud_provider\": true}' $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN} setup-kubernetes/cluster.yml"
|
||||||
|
|
||||||
|
script:
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook tests/cloud_playbooks/create-gce.yml -i tests/local_inventory/hosts.cfg -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
|
|
||||||
|
# Create cluster with netchecker app deployed
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS
|
||||||
|
-b --become-user=root -e cloud_provider=gce $LOG_LEVEL -e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e bootstrap_os=${BOOTSTRAP_OS}
|
||||||
|
-e ansible_python_interpreter=${PYPATH}
|
||||||
|
-e download_run_once=true
|
||||||
|
-e download_localhost=true
|
||||||
|
-e local_release_dir=/var/tmp/releases
|
||||||
|
-e deploy_netchecker=true
|
||||||
|
cluster.yml
|
||||||
|
|
||||||
|
# Tests Cases
|
||||||
|
## Test Master API
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/010_check-apiserver.yml $LOG_LEVEL
|
||||||
|
## Ping the between 2 pod
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/030_check-network.yml $LOG_LEVEL
|
||||||
|
## Advanced DNS checks
|
||||||
|
- $HOME/.local/bin/ansible-playbook -i inventory/inventory.ini -e ansible_python_interpreter=${PYPATH} -u $SSH_USER -e ansible_ssh_user=$SSH_USER $SSH_ARGS -b --become-user=root tests/testcases/040_check-network-adv.yml $LOG_LEVEL
|
||||||
|
|
||||||
|
after_script:
|
||||||
|
- >
|
||||||
|
$HOME/.local/bin/ansible-playbook -i inventory/inventory.ini tests/cloud_playbooks/delete-gce.yml -c local $LOG_LEVEL
|
||||||
|
-e mode=${CLUSTER_MODE}
|
||||||
|
-e test_id=${TEST_ID}
|
||||||
|
-e kube_network_plugin=${KUBE_NETWORK_PLUGIN}
|
||||||
|
-e gce_project_id=${GCE_PROJECT_ID}
|
||||||
|
-e gce_service_account_email=${GCE_ACCOUNT}
|
||||||
|
-e gce_pem_file=${HOME}/.ssh/gce
|
||||||
|
-e cloud_image=${CLOUD_IMAGE}
|
||||||
|
-e inventory_path=${PWD}/inventory/inventory.ini
|
||||||
|
-e cloud_region=${CLOUD_REGION}
|
||||||
3
OWNERS
3
OWNERS
@@ -4,3 +4,6 @@
|
|||||||
owners:
|
owners:
|
||||||
- Smana
|
- Smana
|
||||||
- ant31
|
- ant31
|
||||||
|
- bogdando
|
||||||
|
- mattymo
|
||||||
|
- rsmitty
|
||||||
|
|||||||
98
README.md
98
README.md
@@ -1,10 +1,10 @@
|
|||||||

|

|
||||||
|
|
||||||
##Deploy a production ready kubernetes cluster
|
## Deploy a production ready kubernetes cluster
|
||||||
|
|
||||||
If you have questions, you can [invite yourself](https://slack.kubespray.io/) to **chat** with us on Slack! [](https://kubespray.slack.com)
|
If you have questions, join us on the [kubernetes slack](https://slack.k8s.io), channel **#kargo**.
|
||||||
|
|
||||||
- Can be deployed on **AWS, GCE, OpenStack or Baremetal**
|
- Can be deployed on **AWS, GCE, Azure, OpenStack or Baremetal**
|
||||||
- **High available** cluster
|
- **High available** cluster
|
||||||
- **Composable** (Choice of the network plugin for instance)
|
- **Composable** (Choice of the network plugin for instance)
|
||||||
- Support most popular **Linux distributions**
|
- Support most popular **Linux distributions**
|
||||||
@@ -13,75 +13,103 @@ If you have questions, you can [invite yourself](https://slack.kubespray.io/) to
|
|||||||
|
|
||||||
To deploy the cluster you can use :
|
To deploy the cluster you can use :
|
||||||
|
|
||||||
[**kargo-cli**](https://github.com/kubespray/kargo-cli) (deprecated, a newer [go](https://github.com/Smana/kargo-cli/tree/kargogo) version soon)<br>
|
[**kargo-cli**](https://github.com/kubespray/kargo-cli) <br>
|
||||||
**Ansible** usual commands <br>
|
**Ansible** usual commands and [**inventory builder**](https://github.com/kubernetes-incubator/kargo/blob/master/contrib/inventory_builder/inventory.py) <br>
|
||||||
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
**vagrant** by simply running `vagrant up` (for tests purposes) <br>
|
||||||
|
|
||||||
|
|
||||||
* [Requirements](#requirements)
|
* [Requirements](#requirements)
|
||||||
|
* [Kargo vs ...](docs/comparisons.md)
|
||||||
* [Getting started](docs/getting-started.md)
|
* [Getting started](docs/getting-started.md)
|
||||||
|
* [Ansible inventory and tags](docs/ansible.md)
|
||||||
|
* [Deployment data variables](docs/vars.md)
|
||||||
|
* [DNS stack](docs/dns-stack.md)
|
||||||
|
* [HA mode](docs/ha-mode.md)
|
||||||
|
* [Network plugins](#network-plugins)
|
||||||
* [Vagrant install](docs/vagrant.md)
|
* [Vagrant install](docs/vagrant.md)
|
||||||
* [CoreOS bootstrap](docs/coreos.md)
|
* [CoreOS bootstrap](docs/coreos.md)
|
||||||
* [Ansible variables](docs/ansible.md)
|
* [Downloaded artifacts](docs/downloads.md)
|
||||||
* [Cloud providers](docs/cloud.md)
|
* [Cloud providers](docs/cloud.md)
|
||||||
* [OpenStack](docs/openstack.md)
|
* [OpenStack](docs/openstack.md)
|
||||||
* [AWS](docs/aws.md)
|
* [AWS](docs/aws.md)
|
||||||
* [Network plugins](#network-plugins)
|
* [Azure](docs/azure.md)
|
||||||
|
* [Large deployments](docs/large-deployments.md)
|
||||||
|
* [Upgrades basics](docs/upgrades.md)
|
||||||
* [Roadmap](docs/roadmap.md)
|
* [Roadmap](docs/roadmap.md)
|
||||||
|
|
||||||
Supported Linux distributions
|
Supported Linux distributions
|
||||||
===============
|
===============
|
||||||
|
|
||||||
* **CoreOS**
|
* **Container Linux by CoreOS**
|
||||||
* **Debian** Wheezy, Jessie
|
* **Debian** Jessie
|
||||||
* **Ubuntu** 14.10, 15.04, 15.10, 16.04
|
* **Ubuntu** 16.04
|
||||||
* **Fedora** 23
|
|
||||||
* **CentOS/RHEL** 7
|
* **CentOS/RHEL** 7
|
||||||
|
|
||||||
Versions
|
Note: Upstart/SysV init based OS types are not supported.
|
||||||
--------------
|
|
||||||
|
|
||||||
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.4.0 <br>
|
Versions of supported components
|
||||||
[etcd](https://github.com/coreos/etcd/releases) v3.0.1 <br>
|
--------------------------------
|
||||||
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.20.0 <br>
|
|
||||||
[flanneld](https://github.com/coreos/flannel/releases) v0.5.5 <br>
|
|
||||||
[weave](http://weave.works/) v1.6.1 <br>
|
|
||||||
[docker](https://www.docker.com/) v1.10.3 <br>
|
|
||||||
|
|
||||||
|
[kubernetes](https://github.com/kubernetes/kubernetes/releases) v1.5.1 <br>
|
||||||
|
[etcd](https://github.com/coreos/etcd/releases) v3.0.6 <br>
|
||||||
|
[flanneld](https://github.com/coreos/flannel/releases) v0.6.2 <br>
|
||||||
|
[calicoctl](https://github.com/projectcalico/calico-docker/releases) v0.23.0 <br>
|
||||||
|
[canal](https://github.com/projectcalico/canal) (given calico/flannel versions) <br>
|
||||||
|
[weave](http://weave.works/) v1.8.2 <br>
|
||||||
|
[docker](https://www.docker.com/) v1.12.5 <br>
|
||||||
|
[rkt](https://coreos.com/rkt/docs/latest/) v1.21.0 <br>
|
||||||
|
|
||||||
|
Note: rkt support as docker alternative is limited to control plane (etcd and
|
||||||
|
kubelet). Docker is still used for Kubernetes cluster workloads and network
|
||||||
|
plugins' related OS services. Also note, only one of the supported network
|
||||||
|
plugins can be deployed for a given single cluster.
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
--------------
|
--------------
|
||||||
|
|
||||||
|
* **Ansible v2.2 (or newer) and python-netaddr is installed on the machine
|
||||||
|
that will run Ansible commands**
|
||||||
|
* **Jinja 2.8 (or newer) is required to run the Ansible Playbooks**
|
||||||
* The target servers must have **access to the Internet** in order to pull docker images.
|
* The target servers must have **access to the Internet** in order to pull docker images.
|
||||||
|
* The target servers are configured to allow **IPv4 forwarding**.
|
||||||
|
* **Your ssh key must be copied** to all the servers part of your inventory.
|
||||||
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
* The **firewalls are not managed**, you'll need to implement your own rules the way you used to.
|
||||||
in order to avoid any issue during deployment you should disable your firewall
|
in order to avoid any issue during deployment you should disable your firewall.
|
||||||
* **Copy your ssh keys** to all the servers part of your inventory.
|
|
||||||
* **Ansible v2.x and python-netaddr**
|
|
||||||
|
|
||||||
|
|
||||||
## Network plugins
|
## Network plugins
|
||||||
You can choose between 3 network plugins. (default: `flannel` with vxlan backend)
|
You can choose between 4 network plugins. (default: `calico`)
|
||||||
|
|
||||||
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
* [**flannel**](docs/flannel.md): gre/vxlan (layer 2) networking.
|
||||||
|
|
||||||
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
* [**calico**](docs/calico.md): bgp (layer 3) networking.
|
||||||
|
|
||||||
|
* [**canal**](https://github.com/projectcalico/canal): a composition of calico and flannel plugins.
|
||||||
|
|
||||||
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
* **weave**: Weave is a lightweight container overlay network that doesn't require an external K/V database cluster. <br>
|
||||||
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html))
|
(Please refer to `weave` [troubleshooting documentation](http://docs.weave.works/weave/latest_release/troubleshooting.html)).
|
||||||
|
|
||||||
The choice is defined with the variable `kube_network_plugin`
|
The choice is defined with the variable `kube_network_plugin`. There is also an
|
||||||
|
option to leverage built-in cloud provider networking instead.
|
||||||
|
See also [Network checker](docs/netcheck.md).
|
||||||
|
|
||||||
|
## Community docs and resources
|
||||||
|
- [kubernetes.io/docs/getting-started-guides/kargo/](https://kubernetes.io/docs/getting-started-guides/kargo/)
|
||||||
|
- [kargo, monitoring and logging](https://github.com/gregbkr/kubernetes-kargo-logging-monitoring) by @gregbkr
|
||||||
|
- [Deploy Kubernetes w/ Ansible & Terraform](https://rsmitty.github.io/Terraform-Ansible-Kubernetes/) by @rsmitty
|
||||||
|
- [Deploy a Kubernets Cluster with Kargo (video)](https://www.youtube.com/watch?v=N9q51JgbWu8)
|
||||||
|
|
||||||
|
## Tools and projects on top of Kargo
|
||||||
|
- [Digital Rebar](https://github.com/digitalrebar/digitalrebar)
|
||||||
|
- [Kargo-cli](https://github.com/kubespray/kargo-cli)
|
||||||
|
- [Fuel-ccp-installer](https://github.com/openstack/fuel-ccp-installer)
|
||||||
|
- [Terraform Contrib](https://github.com/kubernetes-incubator/kargo/tree/master/contrib/terraform)
|
||||||
|
|
||||||
## CI Tests
|
## CI Tests
|
||||||
|
|
||||||
[](https://travis-ci.org/kubespray/kargo) </br>
|

|
||||||
|
|
||||||
### Google Compute Engine
|
[](https://gitlab.com/kargo-ci/kubernetes-incubator__kargo/pipelines) </br>
|
||||||
|
|
||||||
| Calico | Flannel | Weave |
|
CI/end-to-end tests sponsored by Google (GCE), DigitalOcean, [teuto.net](https://teuto.net/) (openstack).
|
||||||
------------- | ------------- | ------------- | ------------- |
|
See the [test matrix](docs/test_cases.md) for details.
|
||||||
Ubuntu Xenial |[](https://ci.kubespray.io/job/kargo-gce-xenial-calico/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-xenial-weave)|
|
|
||||||
CentOS 7 |[](https://ci.kubespray.io/job/kargo-gce-centos7-calico/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-centos7-weave/)|
|
|
||||||
CoreOS (stable) |[](https://ci.kubespray.io/job/kargo-gce-coreos-calico/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-flannel/)|[](https://ci.kubespray.io/job/kargo-gce-coreos-weave/)|
|
|
||||||
|
|
||||||
CI tests sponsored by Google (GCE), and [teuto.net](https://teuto.net/) for OpenStack.
|
|
||||||
|
|||||||
31
RELEASE.md
31
RELEASE.md
@@ -7,3 +7,34 @@ The Kargo Project is released on an as-needed basis. The process is as follows:
|
|||||||
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
3. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
|
||||||
4. The release issue is closed
|
4. The release issue is closed
|
||||||
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
5. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kargo $VERSION is released`
|
||||||
|
|
||||||
|
## Major/minor releases, merge freezes and milestones
|
||||||
|
|
||||||
|
* Kargo does not maintain stable branches for releases. Releases are tags, not
|
||||||
|
branches, and there are no backports. Therefore, there is no need for merge
|
||||||
|
freezes as well.
|
||||||
|
|
||||||
|
* Fixes for major releases (vX.x.0) and minor releases (vX.Y.x) are delivered
|
||||||
|
via maintenance releases (vX.Y.Z) and assigned to the corresponding open
|
||||||
|
milestone (vX.Y). That milestone remains open for the major/minor releases
|
||||||
|
support lifetime, which ends once the milestone closed. Then only a next major
|
||||||
|
or minor release can be done.
|
||||||
|
|
||||||
|
* Kargo major and minor releases are bound to the given ``kube_version`` major/minor
|
||||||
|
version numbers and other components' arbitrary versions, like etcd or network plugins.
|
||||||
|
Older or newer versions are not supported and not tested for the given release.
|
||||||
|
|
||||||
|
* There is no unstable releases and no APIs, thus Kargo doesn't follow
|
||||||
|
[semver](http://semver.org/). Every version describes only a stable release.
|
||||||
|
Breaking changes, if any introduced by changed defaults or non-contrib ansible roles'
|
||||||
|
playbooks, shall be described in the release notes. Other breaking changes, if any in
|
||||||
|
the contributed addons or bound versions of Kubernetes and other components, are
|
||||||
|
considered out of Kargo scope and are up to the components' teams to deal with and
|
||||||
|
document.
|
||||||
|
|
||||||
|
* Minor releases can change components' versions, but not the major ``kube_version``.
|
||||||
|
Greater ``kube_version`` requires a new major or minor release. For example, if Kargo v2.0.0
|
||||||
|
is bound to ``kube_version: 1.4.x``, ``calico_version: 0.22.0``, ``etcd_version: v3.0.6``,
|
||||||
|
then Kargo v2.1.0 may be bound to only minor changes to ``kube_version``, like v1.5.1
|
||||||
|
and *any* changes to other components, like etcd v4, or calico 1.2.3.
|
||||||
|
And Kargo v3.x.x shall be bound to ``kube_version: 2.x.x`` respectively.
|
||||||
|
|||||||
45
Vagrantfile
vendored
45
Vagrantfile
vendored
@@ -16,7 +16,14 @@ $vm_cpus = 1
|
|||||||
$shared_folders = {}
|
$shared_folders = {}
|
||||||
$forwarded_ports = {}
|
$forwarded_ports = {}
|
||||||
$subnet = "172.17.8"
|
$subnet = "172.17.8"
|
||||||
$box = "bento/ubuntu-14.04"
|
$box = "bento/ubuntu-16.04"
|
||||||
|
# The first three nodes are etcd servers
|
||||||
|
$etcd_instances = $num_instances
|
||||||
|
# The first two nodes are masters
|
||||||
|
$kube_master_instances = $num_instances == 1 ? $num_instances : ($num_instances - 1)
|
||||||
|
# All nodes are kube nodes
|
||||||
|
$kube_node_instances = $num_instances
|
||||||
|
$local_release_dir = "/vagrant/temp"
|
||||||
|
|
||||||
host_vars = {}
|
host_vars = {}
|
||||||
|
|
||||||
@@ -38,6 +45,13 @@ if ! File.exist?(File.join(File.dirname($inventory), "hosts"))
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
|
$no_proxy = ENV['NO_PROXY'] || ENV['no_proxy'] || "127.0.0.1,localhost"
|
||||||
|
(1..$num_instances).each do |i|
|
||||||
|
$no_proxy += ",#{$subnet}.#{i+100}"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
Vagrant.configure("2") do |config|
|
Vagrant.configure("2") do |config|
|
||||||
# always use Vagrants insecure key
|
# always use Vagrants insecure key
|
||||||
config.ssh.insert_key = false
|
config.ssh.insert_key = false
|
||||||
@@ -52,6 +66,12 @@ Vagrant.configure("2") do |config|
|
|||||||
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config|
|
||||||
config.vm.hostname = vm_name
|
config.vm.hostname = vm_name
|
||||||
|
|
||||||
|
if Vagrant.has_plugin?("vagrant-proxyconf")
|
||||||
|
config.proxy.http = ENV['HTTP_PROXY'] || ENV['http_proxy'] || ""
|
||||||
|
config.proxy.https = ENV['HTTPS_PROXY'] || ENV['https_proxy'] || ""
|
||||||
|
config.proxy.no_proxy = $no_proxy
|
||||||
|
end
|
||||||
|
|
||||||
if $expose_docker_tcp
|
if $expose_docker_tcp
|
||||||
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true
|
||||||
end
|
end
|
||||||
@@ -75,12 +95,14 @@ Vagrant.configure("2") do |config|
|
|||||||
|
|
||||||
ip = "#{$subnet}.#{i+100}"
|
ip = "#{$subnet}.#{i+100}"
|
||||||
host_vars[vm_name] = {
|
host_vars[vm_name] = {
|
||||||
"ip" => ip,
|
"ip": ip,
|
||||||
#"access_ip" => ip,
|
"flannel_interface": ip,
|
||||||
"flannel_interface" => ip,
|
"flannel_backend_type": "host-gw",
|
||||||
"flannel_backend_type" => "host-gw",
|
"local_release_dir" => $local_release_dir,
|
||||||
"local_release_dir" => "/vagrant/temp",
|
"download_run_once": "False",
|
||||||
"download_run_once" => "True"
|
# Override the default 'calico' with flannel.
|
||||||
|
# inventory/group_vars/k8s-cluster.yml
|
||||||
|
"kube_network_plugin": "flannel",
|
||||||
}
|
}
|
||||||
config.vm.network :private_network, ip: ip
|
config.vm.network :private_network, ip: ip
|
||||||
|
|
||||||
@@ -99,12 +121,9 @@ Vagrant.configure("2") do |config|
|
|||||||
ansible.host_vars = host_vars
|
ansible.host_vars = host_vars
|
||||||
#ansible.tags = ['download']
|
#ansible.tags = ['download']
|
||||||
ansible.groups = {
|
ansible.groups = {
|
||||||
# The first three nodes should be etcd servers
|
"etcd" => ["#{$instance_name_prefix}-0[1:#{$etcd_instances}]"],
|
||||||
"etcd" => ["#{$instance_name_prefix}-0[1:3]"],
|
"kube-master" => ["#{$instance_name_prefix}-0[1:#{$kube_master_instances}]"],
|
||||||
# The first two nodes should be masters
|
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$kube_node_instances}]"],
|
||||||
"kube-master" => ["#{$instance_name_prefix}-0[1:2]"],
|
|
||||||
# all nodes should be kube nodes
|
|
||||||
"kube-node" => ["#{$instance_name_prefix}-0[1:#{$num_instances}]"],
|
|
||||||
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
"k8s-cluster:children" => ["kube-master", "kube-node"],
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -1,4 +1,12 @@
|
|||||||
[ssh_connection]
|
[ssh_connection]
|
||||||
pipelining=True
|
pipelining=True
|
||||||
|
#ssh_args = -F ./ssh-bastion.conf -o ControlMaster=auto -o ControlPersist=30m
|
||||||
|
#control_path = ~/.ssh/ansible-%%r@%%h:%%p
|
||||||
[defaults]
|
[defaults]
|
||||||
host_key_checking=False
|
host_key_checking=False
|
||||||
|
gathering = smart
|
||||||
|
fact_caching = jsonfile
|
||||||
|
fact_caching_connection = /tmp
|
||||||
|
stdout_callback = skippy
|
||||||
|
library = ./library
|
||||||
|
callback_whitelist = profile_tasks
|
||||||
|
|||||||
80
cluster.yml
80
cluster.yml
@@ -1,36 +1,92 @@
|
|||||||
---
|
---
|
||||||
- hosts: all
|
- hosts: localhost
|
||||||
gather_facts: false
|
gather_facts: False
|
||||||
roles:
|
roles:
|
||||||
- bootstrap-os
|
- { role: kargo-defaults}
|
||||||
tags:
|
- { role: bastion-ssh-config, tags: ["localhost", "bastion"]}
|
||||||
- bootstrap-os
|
|
||||||
|
|
||||||
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
gather_facts: false
|
||||||
|
vars:
|
||||||
|
# Need to disable pipelining for bootstrap-os as some systems have requiretty in sudoers set, which makes pipelining
|
||||||
|
# fail. bootstrap-os fixes this on these systems, so in later plays it can be enabled.
|
||||||
|
ansible_ssh_pipelining: false
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
|
- { role: bootstrap-os, tags: bootstrap-os}
|
||||||
|
|
||||||
- hosts: all
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
vars:
|
||||||
|
ansible_ssh_pipelining: true
|
||||||
gather_facts: true
|
gather_facts: true
|
||||||
|
|
||||||
- hosts: etcd:!k8s-cluster
|
- hosts: k8s-cluster:etcd:calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
|
- { role: kernel-upgrade, tags: kernel-upgrade, when: kernel_upgrade is defined and kernel_upgrade }
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kubernetes/preinstall, tags: preinstall }
|
||||||
- { role: etcd, tags: etcd }
|
- { role: docker, tags: docker }
|
||||||
|
- role: rkt
|
||||||
|
tags: rkt
|
||||||
|
when: "'rkt' in [etcd_deployment_type, kubelet_deployment_type, vault_deployment_type]"
|
||||||
|
|
||||||
|
- hosts: etcd:k8s-cluster:vault
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults, when: "cert_management == 'vault'" }
|
||||||
|
- { role: vault, tags: vault, vault_bootstrap: true, when: "cert_management == 'vault'" }
|
||||||
|
|
||||||
|
- hosts: etcd
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
|
- { role: etcd, tags: etcd, etcd_cluster_setup: true }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kargo-defaults}
|
||||||
- { role: etcd, tags: etcd }
|
- { role: etcd, tags: etcd, etcd_cluster_setup: false }
|
||||||
|
|
||||||
|
- hosts: etcd:k8s-cluster:vault
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
|
- { role: vault, tags: vault, when: "cert_management == 'vault'"}
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
- { role: kubernetes/node, tags: node }
|
- { role: kubernetes/node, tags: node }
|
||||||
- { role: network_plugin, tags: network }
|
- { role: network_plugin, tags: network }
|
||||||
|
|
||||||
- hosts: kube-master
|
- hosts: kube-master
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: kubernetes/preinstall, tags: preinstall }
|
- { role: kargo-defaults}
|
||||||
- { role: kubernetes/master, tags: master }
|
- { role: kubernetes/master, tags: master }
|
||||||
|
- { role: kubernetes-apps/network_plugin, tags: network }
|
||||||
|
- { role: kubernetes-apps/policy_controller, tags: policy-controller }
|
||||||
|
|
||||||
|
- hosts: calico-rr
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
|
- { role: network_plugin/calico/rr, tags: network }
|
||||||
|
|
||||||
- hosts: k8s-cluster
|
- hosts: k8s-cluster
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
- { role: dnsmasq, tags: dnsmasq }
|
- { role: kargo-defaults}
|
||||||
|
- { role: dnsmasq, when: "dns_mode == 'dnsmasq_kubedns'", tags: dnsmasq }
|
||||||
|
- { role: kubernetes/preinstall, when: "dns_mode != 'none' and resolvconf_mode == 'host_resolvconf'", tags: resolvconf }
|
||||||
|
|
||||||
- hosts: kube-master[0]
|
- hosts: kube-master[0]
|
||||||
|
any_errors_fatal: "{{ any_errors_fatal | default(true) }}"
|
||||||
roles:
|
roles:
|
||||||
|
- { role: kargo-defaults}
|
||||||
- { role: kubernetes-apps, tags: apps }
|
- { role: kubernetes-apps, tags: apps }
|
||||||
|
|||||||
27
contrib/aws_iam/kubernetes-master-policy.json
Normal file
27
contrib/aws_iam/kubernetes-master-policy.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["ec2:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["elasticloadbalancing:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
10
contrib/aws_iam/kubernetes-master-role.json
Normal file
10
contrib/aws_iam/kubernetes-master-role.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
45
contrib/aws_iam/kubernetes-minion-policy.json
Normal file
45
contrib/aws_iam/kubernetes-minion-policy.json
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:Describe*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:AttachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:DetachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"ecr:GetAuthorizationToken",
|
||||||
|
"ecr:BatchCheckLayerAvailability",
|
||||||
|
"ecr:GetDownloadUrlForLayer",
|
||||||
|
"ecr:GetRepositoryPolicy",
|
||||||
|
"ecr:DescribeRepositories",
|
||||||
|
"ecr:ListImages",
|
||||||
|
"ecr:BatchGetImage"
|
||||||
|
],
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
10
contrib/aws_iam/kubernetes-minion-role.json
Normal file
10
contrib/aws_iam/kubernetes-minion-role.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": { "Service": "ec2.amazonaws.com"},
|
||||||
|
"Action": "sts:AssumeRole"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
2
contrib/azurerm/.gitignore
vendored
Normal file
2
contrib/azurerm/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.generated
|
||||||
|
/inventory
|
||||||
64
contrib/azurerm/README.md
Normal file
64
contrib/azurerm/README.md
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
# Kubernetes on Azure with Azure Resource Group Templates
|
||||||
|
|
||||||
|
Provision the base infrastructure for a Kubernetes cluster by using [Azure Resource Group Templates](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-authoring-templates)
|
||||||
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
This will provision the base infrastructure (vnet, vms, nics, ips, ...) needed for Kubernetes in Azure into the specified
|
||||||
|
Resource Group. It will not install Kubernetes itself, this has to be done in a later step by yourself (using kargo of course).
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
- [Install azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-install)
|
||||||
|
- [Login with azure-cli](https://docs.microsoft.com/en-us/azure/xplat-cli-connect)
|
||||||
|
- Dedicated Resource Group created in the Azure Portal or through azure-cli
|
||||||
|
|
||||||
|
## Configuration through group_vars/all
|
||||||
|
|
||||||
|
You have to modify at least one variable in group_vars/all, which is the **cluster_name** variable. It must be globally
|
||||||
|
unique due to some restrictions in Azure. Most other variables should be self explanatory if you have some basic Kubernetes
|
||||||
|
experience.
|
||||||
|
|
||||||
|
## Bastion host
|
||||||
|
|
||||||
|
You can enable the use of a Bastion Host by changing **use_bastion** in group_vars/all to **true**. The generated
|
||||||
|
templates will then include an additional bastion VM which can then be used to connect to the masters and nodes. The option
|
||||||
|
also removes all public IPs from all other VMs.
|
||||||
|
|
||||||
|
## Generating and applying
|
||||||
|
|
||||||
|
To generate and apply the templates, call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./apply-rg.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
If you change something in the configuration (e.g. number of nodes) later, you can call this again and Azure will
|
||||||
|
take care about creating/modifying whatever is needed.
|
||||||
|
|
||||||
|
## Clearing a resource group
|
||||||
|
|
||||||
|
If you need to delete all resources from a resource group, simply call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./clear-rg.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
**WARNING** this really deletes everything from your resource group, including everything that was later created by you!
|
||||||
|
|
||||||
|
|
||||||
|
## Generating an inventory for kargo
|
||||||
|
|
||||||
|
After you have applied the templates, you can generate an inventory with this call:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ ./generate-inventory.sh <resource_group_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
It will create the file ./inventory which can then be used with kargo, e.g.:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
$ cd kargo-root-dir
|
||||||
|
$ ansible-playbook -i contrib/azurerm/inventory -u devops --become -e "@inventory/group_vars/all.yml" cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
19
contrib/azurerm/apply-rg.sh
Executable file
19
contrib/azurerm/apply-rg.sh
Executable file
@@ -0,0 +1,19 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
azure group deployment create -f ./.generated/network.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/storage.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/availability-sets.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/bastion.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/masters.json -g $AZURE_RESOURCE_GROUP
|
||||||
|
azure group deployment create -f ./.generated/minions.json -g $AZURE_RESOURCE_GROUP
|
||||||
14
contrib/azurerm/clear-rg.sh
Executable file
14
contrib/azurerm/clear-rg.sh
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-templates.yml
|
||||||
|
|
||||||
|
azure group deployment create -g "$AZURE_RESOURCE_GROUP" -f ./.generated/clear-rg.json -m Complete
|
||||||
12
contrib/azurerm/generate-inventory.sh
Executable file
12
contrib/azurerm/generate-inventory.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
AZURE_RESOURCE_GROUP="$1"
|
||||||
|
|
||||||
|
if [ "$AZURE_RESOURCE_GROUP" == "" ]; then
|
||||||
|
echo "AZURE_RESOURCE_GROUP is missing"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
ansible-playbook generate-inventory.yml -e azure_resource_group="$AZURE_RESOURCE_GROUP"
|
||||||
5
contrib/azurerm/generate-inventory.yml
Normal file
5
contrib/azurerm/generate-inventory.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-inventory
|
||||||
5
contrib/azurerm/generate-templates.yml
Normal file
5
contrib/azurerm/generate-templates.yml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
roles:
|
||||||
|
- generate-templates
|
||||||
26
contrib/azurerm/group_vars/all
Normal file
26
contrib/azurerm/group_vars/all
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
|
||||||
|
# Due to some Azure limitations, this name must be globally unique
|
||||||
|
cluster_name: example
|
||||||
|
|
||||||
|
# Set this to true if you do not want to have public IPs for your masters and minions. This will provision a bastion
|
||||||
|
# node that can be used to access the masters and minions
|
||||||
|
use_bastion: false
|
||||||
|
|
||||||
|
number_of_k8s_masters: 3
|
||||||
|
number_of_k8s_nodes: 3
|
||||||
|
|
||||||
|
masters_vm_size: Standard_A2
|
||||||
|
masters_os_disk_size: 1000
|
||||||
|
|
||||||
|
minions_vm_size: Standard_A2
|
||||||
|
minions_os_disk_size: 1000
|
||||||
|
|
||||||
|
admin_username: devops
|
||||||
|
admin_password: changeme
|
||||||
|
ssh_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLRzcxbsFDdEibiyXCSdIFh7bKbXso1NqlKjEyPTptf3aBXHEhVil0lJRjGpTlpfTy7PHvXFbXIOCdv9tOmeH1uxWDDeZawgPFV6VSZ1QneCL+8bxzhjiCn8133wBSPZkN8rbFKd9eEUUBfx8ipCblYblF9FcidylwtMt5TeEmXk8yRVkPiCuEYuDplhc2H0f4PsK3pFb5aDVdaDT3VeIypnOQZZoUxHWqm6ThyHrzLJd3SrZf+RROFWW1uInIDf/SZlXojczUYoffxgT1lERfOJCHJXsqbZWugbxQBwqsVsX59+KPxFFo6nV88h3UQr63wbFx52/MXkX4WrCkAHzN ablock-vwfs@dell-lappy"
|
||||||
|
|
||||||
|
# Azure CIDRs
|
||||||
|
azure_vnet_cidr: 10.0.0.0/8
|
||||||
|
azure_admin_cidr: 10.241.2.0/24
|
||||||
|
azure_masters_cidr: 10.0.4.0/24
|
||||||
|
azure_minions_cidr: 10.240.0.0/16
|
||||||
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
11
contrib/azurerm/roles/generate-inventory/tasks/main.yml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Query Azure VMs
|
||||||
|
command: azure vm list-ip-address --json {{ azure_resource_group }}
|
||||||
|
register: vm_list_cmd
|
||||||
|
|
||||||
|
- set_fact:
|
||||||
|
vm_list: "{{ vm_list_cmd.stdout }}"
|
||||||
|
|
||||||
|
- name: Generate inventory
|
||||||
|
template: src=inventory.j2 dest="{{playbook_dir}}/inventory"
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if not use_bastion or vm.name == 'bastion' %}
|
||||||
|
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].publicIPAddress.expanded.ipAddress }} ip={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||||
|
{% else %}
|
||||||
|
{{ vm.name }} ansible_ssh_host={{ vm.networkProfile.networkInterfaces[0].expanded.ipConfigurations[0].privateIPAddress }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'kube-master' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'etcd' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
{% for vm in vm_list %}
|
||||||
|
{% if 'kube-node' in vm.tags.roles %}
|
||||||
|
{{ vm.name }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
37
contrib/azurerm/roles/generate-templates/defaults/main.yml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: "2015-06-15"
|
||||||
|
|
||||||
|
virtualNetworkName: "KubVNET"
|
||||||
|
|
||||||
|
subnetAdminName: "ad-subnet"
|
||||||
|
subnetMastersName: "master-subnet"
|
||||||
|
subnetMinionsName: "minion-subnet"
|
||||||
|
|
||||||
|
routeTableName: "routetable"
|
||||||
|
securityGroupName: "secgroup"
|
||||||
|
|
||||||
|
nameSuffix: "{{cluster_name}}"
|
||||||
|
|
||||||
|
availabilitySetMasters: "master-avs"
|
||||||
|
availabilitySetMinions: "minion-avs"
|
||||||
|
|
||||||
|
faultDomainCount: 3
|
||||||
|
updateDomainCount: 10
|
||||||
|
|
||||||
|
bastionVmSize: Standard_A0
|
||||||
|
bastionVMName: bastion
|
||||||
|
bastionIPAddressName: bastion-pubip
|
||||||
|
|
||||||
|
disablePasswordAuthentication: true
|
||||||
|
|
||||||
|
sshKeyPath: "/home/{{admin_username}}/.ssh/authorized_keys"
|
||||||
|
|
||||||
|
imageReference:
|
||||||
|
publisher: "OpenLogic"
|
||||||
|
offer: "CentOS"
|
||||||
|
sku: "7.2"
|
||||||
|
version: "latest"
|
||||||
|
imageReferenceJson: "{{imageReference|to_json}}"
|
||||||
|
|
||||||
|
storageAccountName: "sa{{nameSuffix | replace('-', '')}}"
|
||||||
|
storageAccountType: "Standard_LRS"
|
||||||
|
|
||||||
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
14
contrib/azurerm/roles/generate-templates/tasks/main.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
- set_fact:
|
||||||
|
base_dir: "{{playbook_dir}}/.generated/"
|
||||||
|
|
||||||
|
- file: path={{base_dir}} state=directory recurse=true
|
||||||
|
|
||||||
|
- template: src={{item}} dest="{{base_dir}}/{{item}}"
|
||||||
|
with_items:
|
||||||
|
- network.json
|
||||||
|
- storage.json
|
||||||
|
- availability-sets.json
|
||||||
|
- bastion.json
|
||||||
|
- masters.json
|
||||||
|
- minions.json
|
||||||
|
- clear-rg.json
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/availabilitySets",
|
||||||
|
"name": "{{availabilitySetMasters}}",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||||
|
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/availabilitySets",
|
||||||
|
"name": "{{availabilitySetMinions}}",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"PlatformFaultDomainCount": "{{faultDomainCount}}",
|
||||||
|
"PlatformUpdateDomainCount": "{{updateDomainCount}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,99 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"subnetAdminRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetAdminName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{% if use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "{{bastionIPAddressName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "{{bastionVMName}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', '{{bastionIPAddressName}}')]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "BastionIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', '{{bastionIPAddressName}}')]"
|
||||||
|
},
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('subnetAdminRef')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "{{bastionVMName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', '{{bastionVMName}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "bastion"
|
||||||
|
},
|
||||||
|
"properties": {
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{bastionVmSize}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "{{bastionVMName}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "true",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "osdisk",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://', '{{storageAccountName}}', '.blob.core.windows.net/vhds/', '{{bastionVMName}}', '-osdisk.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', '{{bastionVMName}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {},
|
||||||
|
"variables": {},
|
||||||
|
"resources": [],
|
||||||
|
"outputs": {}
|
||||||
|
}
|
||||||
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
196
contrib/azurerm/roles/generate-templates/templates/masters.json
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"lbDomainName": "{{nameSuffix}}-api",
|
||||||
|
"lbPublicIPAddressName": "kubernetes-api-pubip",
|
||||||
|
"lbPublicIPAddressType": "Static",
|
||||||
|
"lbPublicIPAddressID": "[resourceId('Microsoft.Network/publicIPAddresses',variables('lbPublicIPAddressName'))]",
|
||||||
|
"lbName": "kubernetes-api",
|
||||||
|
"lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]",
|
||||||
|
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"kubeMastersSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMastersName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "[variables('lbPublicIPAddressName')]",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "[variables('lbPublicIPAddressType')]",
|
||||||
|
"dnsSettings": {
|
||||||
|
"domainNameLabel": "[variables('lbDomainName')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"name": "[variables('lbName')]",
|
||||||
|
"type": "Microsoft.Network/loadBalancers",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"frontendIPConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "kube-api-frontend",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[variables('lbPublicIPAddressID')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"backendAddressPools": [
|
||||||
|
{
|
||||||
|
"name": "kube-api-backend"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"loadBalancingRules": [
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"frontendIPConfiguration": {
|
||||||
|
"id": "[concat(variables('lbID'), '/frontendIPConfigurations/kube-api-frontend')]"
|
||||||
|
},
|
||||||
|
"backendAddressPool": {
|
||||||
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
|
},
|
||||||
|
"protocol": "tcp",
|
||||||
|
"frontendPort": 443,
|
||||||
|
"backendPort": 443,
|
||||||
|
"enableFloatingIP": false,
|
||||||
|
"idleTimeoutInMinutes": 5,
|
||||||
|
"probe": {
|
||||||
|
"id": "[concat(variables('lbID'), '/probes/kube-api')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"probes": [
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"protocol": "tcp",
|
||||||
|
"port": 443,
|
||||||
|
"intervalInSeconds": 5,
|
||||||
|
"numberOfProbes": 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% for i in range(number_of_k8s_masters) %}
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "master-{{i}}-pubip",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "master-{{i}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', 'master-{{i}}-pubip')]",
|
||||||
|
{% endif %}
|
||||||
|
"[concat('Microsoft.Network/loadBalancers/', variables('lbName'))]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "MastersIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'master-{{i}}-pubip')]"
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('kubeMastersSubnetRef')]"
|
||||||
|
},
|
||||||
|
"loadBalancerBackendAddressPools": [
|
||||||
|
{
|
||||||
|
"id": "[concat(variables('lbID'), '/backendAddressPools/kube-api-backend')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"networkSecurityGroup": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||||
|
},
|
||||||
|
"enableIPForwarding": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "master-{{i}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', 'master-{{i}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "kube-master,etcd"
|
||||||
|
},
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"availabilitySet": {
|
||||||
|
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMasters}}')]"
|
||||||
|
},
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{masters_vm_size}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "master-{{i}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "ma{{nameSuffix}}{{i}}",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/master-{{i}}.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage",
|
||||||
|
"diskSizeGB": "{{masters_os_disk_size}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'master-{{i}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} {% if not loop.last %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
113
contrib/azurerm/roles/generate-templates/templates/minions.json
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks', '{{virtualNetworkName}}')]",
|
||||||
|
"kubeMinionsSubnetRef": "[concat(variables('vnetID'),'/subnets/', '{{subnetMinionsName}}')]"
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{% for i in range(number_of_k8s_nodes) %}
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/publicIPAddresses",
|
||||||
|
"name": "minion-{{i}}-pubip",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"publicIPAllocationMethod": "Static"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkInterfaces",
|
||||||
|
"name": "minion-{{i}}-nic",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"[concat('Microsoft.Network/publicIPAddresses/', 'minion-{{i}}-pubip')]"
|
||||||
|
{% endif %}
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"ipConfigurations": [
|
||||||
|
{
|
||||||
|
"name": "MinionsIpConfig",
|
||||||
|
"properties": {
|
||||||
|
"privateIPAllocationMethod": "Dynamic",
|
||||||
|
{% if not use_bastion %}
|
||||||
|
"publicIPAddress": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/publicIPAddresses', 'minion-{{i}}-pubip')]"
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
"subnet": {
|
||||||
|
"id": "[variables('kubeMinionsSubnetRef')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"networkSecurityGroup": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', '{{securityGroupName}}')]"
|
||||||
|
},
|
||||||
|
"enableIPForwarding": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Compute/virtualMachines",
|
||||||
|
"name": "minion-{{i}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/networkInterfaces/', 'minion-{{i}}-nic')]"
|
||||||
|
],
|
||||||
|
"tags": {
|
||||||
|
"roles": "kube-node"
|
||||||
|
},
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"availabilitySet": {
|
||||||
|
"id": "[resourceId('Microsoft.Compute/availabilitySets', '{{availabilitySetMinions}}')]"
|
||||||
|
},
|
||||||
|
"hardwareProfile": {
|
||||||
|
"vmSize": "{{minions_vm_size}}"
|
||||||
|
},
|
||||||
|
"osProfile": {
|
||||||
|
"computerName": "minion-{{i}}",
|
||||||
|
"adminUsername": "{{admin_username}}",
|
||||||
|
"adminPassword": "{{admin_password}}",
|
||||||
|
"linuxConfiguration": {
|
||||||
|
"disablePasswordAuthentication": "{{disablePasswordAuthentication}}",
|
||||||
|
"ssh": {
|
||||||
|
"publicKeys": [
|
||||||
|
{
|
||||||
|
"path": "{{sshKeyPath}}",
|
||||||
|
"keyData": "{{ssh_public_key}}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"storageProfile": {
|
||||||
|
"imageReference": {{imageReferenceJson}},
|
||||||
|
"osDisk": {
|
||||||
|
"name": "mi{{nameSuffix}}{{i}}",
|
||||||
|
"vhd": {
|
||||||
|
"uri": "[concat('http://','{{storageAccountName}}','.blob.core.windows.net/vhds/minion-{{i}}.vhd')]"
|
||||||
|
},
|
||||||
|
"caching": "ReadWrite",
|
||||||
|
"createOption": "FromImage",
|
||||||
|
"diskSizeGB": "{{minions_os_disk_size}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"networkProfile": {
|
||||||
|
"networkInterfaces": [
|
||||||
|
{
|
||||||
|
"id": "[resourceId('Microsoft.Network/networkInterfaces', 'minion-{{i}}-nic')]"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} {% if not loop.last %},{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
109
contrib/azurerm/roles/generate-templates/templates/network.json
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/routeTables",
|
||||||
|
"name": "{{routeTableName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"routes": [
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Network/virtualNetworks",
|
||||||
|
"name": "{{virtualNetworkName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"dependsOn": [
|
||||||
|
"[concat('Microsoft.Network/routeTables/', '{{routeTableName}}')]"
|
||||||
|
],
|
||||||
|
"properties": {
|
||||||
|
"addressSpace": {
|
||||||
|
"addressPrefixes": [
|
||||||
|
"{{azure_vnet_cidr}}"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"subnets": [
|
||||||
|
{
|
||||||
|
"name": "{{subnetMastersName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_masters_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "{{subnetMinionsName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_minions_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% if use_bastion %}
|
||||||
|
,{
|
||||||
|
"name": "{{subnetAdminName}}",
|
||||||
|
"properties": {
|
||||||
|
"addressPrefix": "{{azure_admin_cidr}}",
|
||||||
|
"routeTable": {
|
||||||
|
"id": "[resourceId('Microsoft.Network/routeTables', '{{routeTableName}}')]"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{% endif %}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"type": "Microsoft.Network/networkSecurityGroups",
|
||||||
|
"name": "{{securityGroupName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"properties": {
|
||||||
|
"securityRules": [
|
||||||
|
{% if not use_bastion %}
|
||||||
|
{
|
||||||
|
"name": "ssh",
|
||||||
|
"properties": {
|
||||||
|
"description": "Allow SSH",
|
||||||
|
"protocol": "Tcp",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "22",
|
||||||
|
"sourceAddressPrefix": "Internet",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 100,
|
||||||
|
"direction": "Inbound"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{% endif %}
|
||||||
|
{
|
||||||
|
"name": "kube-api",
|
||||||
|
"properties": {
|
||||||
|
"description": "Allow secure kube-api",
|
||||||
|
"protocol": "Tcp",
|
||||||
|
"sourcePortRange": "*",
|
||||||
|
"destinationPortRange": "443",
|
||||||
|
"sourceAddressPrefix": "Internet",
|
||||||
|
"destinationAddressPrefix": "*",
|
||||||
|
"access": "Allow",
|
||||||
|
"priority": 101,
|
||||||
|
"direction": "Inbound"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"resources": [],
|
||||||
|
"dependsOn": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||||
|
"contentVersion": "1.0.0.0",
|
||||||
|
"parameters": {
|
||||||
|
},
|
||||||
|
"variables": {
|
||||||
|
},
|
||||||
|
"resources": [
|
||||||
|
{
|
||||||
|
"type": "Microsoft.Storage/storageAccounts",
|
||||||
|
"name": "{{storageAccountName}}",
|
||||||
|
"location": "[resourceGroup().location]",
|
||||||
|
"apiVersion": "{{apiVersion}}",
|
||||||
|
"properties": {
|
||||||
|
"accountType": "{{storageAccountType}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
343
contrib/inventory_builder/inventory.py
Normal file
343
contrib/inventory_builder/inventory.py
Normal file
@@ -0,0 +1,343 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# Usage: inventory.py ip1 [ip2 ...]
|
||||||
|
# Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||||
|
#
|
||||||
|
# Advanced usage:
|
||||||
|
# Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
# Delete a host: inventory.py -10.10.1.3
|
||||||
|
# Delete a host by id: inventory.py -node1
|
||||||
|
#
|
||||||
|
# Load a YAML or JSON file with inventory data: inventory.py load hosts.yaml
|
||||||
|
# YAML file should be in the following format:
|
||||||
|
# group1:
|
||||||
|
# host1:
|
||||||
|
# ip: X.X.X.X
|
||||||
|
# var: val
|
||||||
|
# group2:
|
||||||
|
# host2:
|
||||||
|
# ip: X.X.X.X
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
try:
|
||||||
|
import configparser
|
||||||
|
except ImportError:
|
||||||
|
import ConfigParser as configparser
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
|
ROLES = ['all', 'kube-master', 'kube-node', 'etcd', 'k8s-cluster:children',
|
||||||
|
'calico-rr']
|
||||||
|
PROTECTED_NAMES = ROLES
|
||||||
|
AVAILABLE_COMMANDS = ['help', 'print_cfg', 'print_ips', 'load']
|
||||||
|
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
||||||
|
'0': False, 'no': False, 'false': False, 'off': False}
|
||||||
|
|
||||||
|
|
||||||
|
def get_var_as_bool(name, default):
|
||||||
|
value = os.environ.get(name, '')
|
||||||
|
return _boolean_states.get(value.lower(), default)
|
||||||
|
|
||||||
|
# Configurable as shell vars start
|
||||||
|
|
||||||
|
CONFIG_FILE = os.environ.get("CONFIG_FILE", "./inventory.cfg")
|
||||||
|
# Reconfigures cluster distribution at scale
|
||||||
|
SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 50))
|
||||||
|
MASSIVE_SCALE_THRESHOLD = int(os.environ.get("SCALE_THRESHOLD", 200))
|
||||||
|
|
||||||
|
DEBUG = get_var_as_bool("DEBUG", True)
|
||||||
|
HOST_PREFIX = os.environ.get("HOST_PREFIX", "node")
|
||||||
|
|
||||||
|
# Configurable as shell vars end
|
||||||
|
|
||||||
|
|
||||||
|
class KargoInventory(object):
|
||||||
|
|
||||||
|
def __init__(self, changed_hosts=None, config_file=None):
|
||||||
|
self.config = configparser.ConfigParser(allow_no_value=True,
|
||||||
|
delimiters=('\t', ' '))
|
||||||
|
self.config_file = config_file
|
||||||
|
if self.config_file:
|
||||||
|
self.config.read(self.config_file)
|
||||||
|
|
||||||
|
if changed_hosts and changed_hosts[0] in AVAILABLE_COMMANDS:
|
||||||
|
self.parse_command(changed_hosts[0], changed_hosts[1:])
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
self.ensure_required_groups(ROLES)
|
||||||
|
|
||||||
|
if changed_hosts:
|
||||||
|
self.hosts = self.build_hostnames(changed_hosts)
|
||||||
|
self.purge_invalid_hosts(self.hosts.keys(), PROTECTED_NAMES)
|
||||||
|
self.set_all(self.hosts)
|
||||||
|
self.set_k8s_cluster()
|
||||||
|
self.set_etcd(list(self.hosts.keys())[:3])
|
||||||
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
|
self.set_kube_master(list(self.hosts.keys())[3:5])
|
||||||
|
else:
|
||||||
|
self.set_kube_master(list(self.hosts.keys())[:2])
|
||||||
|
self.set_kube_node(self.hosts.keys())
|
||||||
|
if len(self.hosts) >= SCALE_THRESHOLD:
|
||||||
|
self.set_calico_rr(list(self.hosts.keys())[:3])
|
||||||
|
else: # Show help if no options
|
||||||
|
self.show_help()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
self.write_config(self.config_file)
|
||||||
|
|
||||||
|
def write_config(self, config_file):
|
||||||
|
if config_file:
|
||||||
|
with open(config_file, 'w') as f:
|
||||||
|
self.config.write(f)
|
||||||
|
else:
|
||||||
|
print("WARNING: Unable to save config. Make sure you set "
|
||||||
|
"CONFIG_FILE env var.")
|
||||||
|
|
||||||
|
def debug(self, msg):
|
||||||
|
if DEBUG:
|
||||||
|
print("DEBUG: {0}".format(msg))
|
||||||
|
|
||||||
|
def get_ip_from_opts(self, optstring):
|
||||||
|
opts = optstring.split(' ')
|
||||||
|
for opt in opts:
|
||||||
|
if '=' not in opt:
|
||||||
|
continue
|
||||||
|
k, v = opt.split('=')
|
||||||
|
if k == "ip":
|
||||||
|
return v
|
||||||
|
raise ValueError("IP parameter not found in options")
|
||||||
|
|
||||||
|
def ensure_required_groups(self, groups):
|
||||||
|
for group in groups:
|
||||||
|
try:
|
||||||
|
self.debug("Adding group {0}".format(group))
|
||||||
|
self.config.add_section(group)
|
||||||
|
except configparser.DuplicateSectionError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_host_id(self, host):
|
||||||
|
'''Returns integer host ID (without padding) from a given hostname.'''
|
||||||
|
try:
|
||||||
|
short_hostname = host.split('.')[0]
|
||||||
|
return int(re.findall("\d+$", short_hostname)[-1])
|
||||||
|
except IndexError:
|
||||||
|
raise ValueError("Host name must end in an integer")
|
||||||
|
|
||||||
|
def build_hostnames(self, changed_hosts):
|
||||||
|
existing_hosts = OrderedDict()
|
||||||
|
highest_host_id = 0
|
||||||
|
try:
|
||||||
|
for host, opts in self.config.items('all'):
|
||||||
|
existing_hosts[host] = opts
|
||||||
|
host_id = self.get_host_id(host)
|
||||||
|
if host_id > highest_host_id:
|
||||||
|
highest_host_id = host_id
|
||||||
|
except configparser.NoSectionError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# FIXME(mattymo): Fix condition where delete then add reuses highest id
|
||||||
|
next_host_id = highest_host_id + 1
|
||||||
|
|
||||||
|
all_hosts = existing_hosts.copy()
|
||||||
|
for host in changed_hosts:
|
||||||
|
if host[0] == "-":
|
||||||
|
realhost = host[1:]
|
||||||
|
if self.exists_hostname(all_hosts, realhost):
|
||||||
|
self.debug("Marked {0} for deletion.".format(realhost))
|
||||||
|
all_hosts.pop(realhost)
|
||||||
|
elif self.exists_ip(all_hosts, realhost):
|
||||||
|
self.debug("Marked {0} for deletion.".format(realhost))
|
||||||
|
self.delete_host_by_ip(all_hosts, realhost)
|
||||||
|
elif host[0].isdigit():
|
||||||
|
if self.exists_hostname(all_hosts, host):
|
||||||
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
|
continue
|
||||||
|
elif self.exists_ip(all_hosts, host):
|
||||||
|
self.debug("Skipping existing host {0}.".format(host))
|
||||||
|
continue
|
||||||
|
|
||||||
|
next_host = "{0}{1}".format(HOST_PREFIX, next_host_id)
|
||||||
|
next_host_id += 1
|
||||||
|
all_hosts[next_host] = "ansible_host={0} ip={1}".format(
|
||||||
|
host, host)
|
||||||
|
elif host[0].isalpha():
|
||||||
|
raise Exception("Adding hosts by hostname is not supported.")
|
||||||
|
|
||||||
|
return all_hosts
|
||||||
|
|
||||||
|
def exists_hostname(self, existing_hosts, hostname):
|
||||||
|
return hostname in existing_hosts.keys()
|
||||||
|
|
||||||
|
def exists_ip(self, existing_hosts, ip):
|
||||||
|
for host_opts in existing_hosts.values():
|
||||||
|
if ip == self.get_ip_from_opts(host_opts):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def delete_host_by_ip(self, existing_hosts, ip):
|
||||||
|
for hostname, host_opts in existing_hosts.items():
|
||||||
|
if ip == self.get_ip_from_opts(host_opts):
|
||||||
|
del existing_hosts[hostname]
|
||||||
|
return
|
||||||
|
raise ValueError("Unable to find host by IP: {0}".format(ip))
|
||||||
|
|
||||||
|
def purge_invalid_hosts(self, hostnames, protected_names=[]):
|
||||||
|
for role in self.config.sections():
|
||||||
|
for host, _ in self.config.items(role):
|
||||||
|
if host not in hostnames and host not in protected_names:
|
||||||
|
self.debug("Host {0} removed from role {1}".format(host,
|
||||||
|
role))
|
||||||
|
self.config.remove_option(role, host)
|
||||||
|
|
||||||
|
def add_host_to_group(self, group, host, opts=""):
|
||||||
|
self.debug("adding host {0} to group {1}".format(host, group))
|
||||||
|
self.config.set(group, host, opts)
|
||||||
|
|
||||||
|
def set_kube_master(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
self.add_host_to_group('kube-master', host)
|
||||||
|
|
||||||
|
def set_all(self, hosts):
|
||||||
|
for host, opts in hosts.items():
|
||||||
|
self.add_host_to_group('all', host, opts)
|
||||||
|
|
||||||
|
def set_k8s_cluster(self):
|
||||||
|
self.add_host_to_group('k8s-cluster:children', 'kube-node')
|
||||||
|
self.add_host_to_group('k8s-cluster:children', 'kube-master')
|
||||||
|
|
||||||
|
def set_calico_rr(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
if host in self.config.items('kube-master'):
|
||||||
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
|
"conflicts with kube-master group".format(host))
|
||||||
|
continue
|
||||||
|
if host in self.config.items('kube-node'):
|
||||||
|
self.debug("Not adding {0} to calico-rr group because it "
|
||||||
|
"conflicts with kube-node group".format(host))
|
||||||
|
continue
|
||||||
|
self.add_host_to_group('calico-rr', host)
|
||||||
|
|
||||||
|
def set_kube_node(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
if len(self.config['all']) >= SCALE_THRESHOLD:
|
||||||
|
if self.config.has_option('etcd', host):
|
||||||
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
|
"scale deployment and host is in etcd "
|
||||||
|
"group.".format(host))
|
||||||
|
continue
|
||||||
|
if len(self.config['all']) >= MASSIVE_SCALE_THRESHOLD:
|
||||||
|
if self.config.has_option('kube-master', host):
|
||||||
|
self.debug("Not adding {0} to kube-node group because of "
|
||||||
|
"scale deployment and host is in kube-master "
|
||||||
|
"group.".format(host))
|
||||||
|
continue
|
||||||
|
self.add_host_to_group('kube-node', host)
|
||||||
|
|
||||||
|
def set_etcd(self, hosts):
|
||||||
|
for host in hosts:
|
||||||
|
self.add_host_to_group('etcd', host)
|
||||||
|
|
||||||
|
def load_file(self, files=None):
|
||||||
|
'''Directly loads JSON, or YAML file to inventory.'''
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
raise Exception("No input file specified.")
|
||||||
|
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
for filename in list(files):
|
||||||
|
# Try JSON, then YAML
|
||||||
|
try:
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
data = yaml.load(f)
|
||||||
|
print("yaml")
|
||||||
|
except ValueError:
|
||||||
|
raise Exception("Cannot read %s as JSON, YAML, or CSV",
|
||||||
|
filename)
|
||||||
|
|
||||||
|
self.ensure_required_groups(ROLES)
|
||||||
|
self.set_k8s_cluster()
|
||||||
|
for group, hosts in data.items():
|
||||||
|
self.ensure_required_groups([group])
|
||||||
|
for host, opts in hosts.items():
|
||||||
|
optstring = "ansible_host={0} ip={0}".format(opts['ip'])
|
||||||
|
for key, val in opts.items():
|
||||||
|
if key == "ip":
|
||||||
|
continue
|
||||||
|
optstring += " {0}={1}".format(key, val)
|
||||||
|
|
||||||
|
self.add_host_to_group('all', host, optstring)
|
||||||
|
self.add_host_to_group(group, host)
|
||||||
|
self.write_config(self.config_file)
|
||||||
|
|
||||||
|
def parse_command(self, command, args=None):
|
||||||
|
if command == 'help':
|
||||||
|
self.show_help()
|
||||||
|
elif command == 'print_cfg':
|
||||||
|
self.print_config()
|
||||||
|
elif command == 'print_ips':
|
||||||
|
self.print_ips()
|
||||||
|
elif command == 'load':
|
||||||
|
self.load_file(args)
|
||||||
|
else:
|
||||||
|
raise Exception("Invalid command specified.")
|
||||||
|
|
||||||
|
def show_help(self):
|
||||||
|
help_text = '''Usage: inventory.py ip1 [ip2 ...]
|
||||||
|
Examples: inventory.py 10.10.1.3 10.10.1.4 10.10.1.5
|
||||||
|
|
||||||
|
Available commands:
|
||||||
|
help - Display this message
|
||||||
|
print_cfg - Write inventory file to stdout
|
||||||
|
print_ips - Write a space-delimited list of IPs from "all" group
|
||||||
|
|
||||||
|
Advanced usage:
|
||||||
|
Add another host after initial creation: inventory.py 10.10.1.5
|
||||||
|
Delete a host: inventory.py -10.10.1.3
|
||||||
|
Delete a host by id: inventory.py -node1
|
||||||
|
|
||||||
|
Configurable env vars:
|
||||||
|
DEBUG Enable debug printing. Default: True
|
||||||
|
CONFIG_FILE File to write config to Default: ./inventory.cfg
|
||||||
|
HOST_PREFIX Host prefix for generated hosts. Default: node
|
||||||
|
SCALE_THRESHOLD Separate ETCD role if # of nodes >= 50
|
||||||
|
MASSIVE_SCALE_THRESHOLD Separate K8s master and ETCD if # of nodes >= 200
|
||||||
|
'''
|
||||||
|
print(help_text)
|
||||||
|
|
||||||
|
def print_config(self):
|
||||||
|
self.config.write(sys.stdout)
|
||||||
|
|
||||||
|
def print_ips(self):
|
||||||
|
ips = []
|
||||||
|
for host, opts in self.config.items('all'):
|
||||||
|
ips.append(self.get_ip_from_opts(opts))
|
||||||
|
print(' '.join(ips))
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=None):
|
||||||
|
if not argv:
|
||||||
|
argv = sys.argv[1:]
|
||||||
|
KargoInventory(argv, CONFIG_FILE)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
||||||
1
contrib/inventory_builder/requirements.txt
Normal file
1
contrib/inventory_builder/requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
configparser>=3.3.0
|
||||||
3
contrib/inventory_builder/setup.cfg
Normal file
3
contrib/inventory_builder/setup.cfg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[metadata]
|
||||||
|
name = kargo-inventory-builder
|
||||||
|
version = 0.1
|
||||||
29
contrib/inventory_builder/setup.py
Normal file
29
contrib/inventory_builder/setup.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||||
|
import setuptools
|
||||||
|
|
||||||
|
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||||
|
# setuptools if some other modules registered functions in `atexit`.
|
||||||
|
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||||
|
try:
|
||||||
|
import multiprocessing # noqa
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
setup_requires=[],
|
||||||
|
pbr=False)
|
||||||
3
contrib/inventory_builder/test-requirements.txt
Normal file
3
contrib/inventory_builder/test-requirements.txt
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
hacking>=0.10.2
|
||||||
|
pytest>=2.8.0
|
||||||
|
mock>=1.3.0
|
||||||
240
contrib/inventory_builder/tests/test_inventory.py
Normal file
240
contrib/inventory_builder/tests/test_inventory.py
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
import sys
|
||||||
|
|
||||||
|
path = "./contrib/inventory_builder/"
|
||||||
|
if path not in sys.path:
|
||||||
|
sys.path.append(path)
|
||||||
|
|
||||||
|
import inventory
|
||||||
|
|
||||||
|
|
||||||
|
class TestInventory(unittest.TestCase):
|
||||||
|
@mock.patch('inventory.sys')
|
||||||
|
def setUp(self, sys_mock):
|
||||||
|
sys_mock.exit = mock.Mock()
|
||||||
|
super(TestInventory, self).setUp()
|
||||||
|
self.data = ['10.90.3.2', '10.90.3.3', '10.90.3.4']
|
||||||
|
self.inv = inventory.KargoInventory()
|
||||||
|
|
||||||
|
def test_get_ip_from_opts(self):
|
||||||
|
optstring = "ansible_host=10.90.3.2 ip=10.90.3.2"
|
||||||
|
expected = "10.90.3.2"
|
||||||
|
result = self.inv.get_ip_from_opts(optstring)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_get_ip_from_opts_invalid(self):
|
||||||
|
optstring = "notanaddr=value something random!chars:D"
|
||||||
|
self.assertRaisesRegexp(ValueError, "IP parameter not found",
|
||||||
|
self.inv.get_ip_from_opts, optstring)
|
||||||
|
|
||||||
|
def test_ensure_required_groups(self):
|
||||||
|
groups = ['group1', 'group2']
|
||||||
|
self.inv.ensure_required_groups(groups)
|
||||||
|
for group in groups:
|
||||||
|
self.assertTrue(group in self.inv.config.sections())
|
||||||
|
|
||||||
|
def test_get_host_id(self):
|
||||||
|
hostnames = ['node99', 'no99de01', '01node01', 'node1.domain',
|
||||||
|
'node3.xyz123.aaa']
|
||||||
|
expected = [99, 1, 1, 1, 3]
|
||||||
|
for hostname, expected in zip(hostnames, expected):
|
||||||
|
result = self.inv.get_host_id(hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_get_host_id_invalid(self):
|
||||||
|
bad_hostnames = ['node', 'no99de', '01node', 'node.111111']
|
||||||
|
for hostname in bad_hostnames:
|
||||||
|
self.assertRaisesRegexp(ValueError, "Host name must end in an",
|
||||||
|
self.inv.get_host_id, hostname)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_one(self):
|
||||||
|
changed_hosts = ['10.90.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_duplicate(self):
|
||||||
|
changed_hosts = ['10.90.0.2']
|
||||||
|
expected = OrderedDict([('node1',
|
||||||
|
'ansible_host=10.90.0.2 ip=10.90.0.2')])
|
||||||
|
self.inv.config['all'] = expected
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_add_two(self):
|
||||||
|
changed_hosts = ['10.90.0.2', '10.90.0.3']
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.config['all'] = OrderedDict()
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_build_hostnames_delete_first(self):
|
||||||
|
changed_hosts = ['-10.90.0.2']
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.config['all'] = existing_hosts
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.build_hostnames(changed_hosts)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_hostname_positive(self):
|
||||||
|
hostname = 'node1'
|
||||||
|
expected = True
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_hostname_negative(self):
|
||||||
|
hostname = 'node99'
|
||||||
|
expected = False
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_hostname(existing_hosts, hostname)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_ip_positive(self):
|
||||||
|
ip = '10.90.0.2'
|
||||||
|
expected = True
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_exists_ip_negative(self):
|
||||||
|
ip = '10.90.0.200'
|
||||||
|
expected = False
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
result = self.inv.exists_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, result)
|
||||||
|
|
||||||
|
def test_delete_host_by_ip_positive(self):
|
||||||
|
ip = '10.90.0.2'
|
||||||
|
expected = OrderedDict([
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.inv.delete_host_by_ip(existing_hosts, ip)
|
||||||
|
self.assertEqual(expected, existing_hosts)
|
||||||
|
|
||||||
|
def test_delete_host_by_ip_negative(self):
|
||||||
|
ip = '10.90.0.200'
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3')])
|
||||||
|
self.assertRaisesRegexp(ValueError, "Unable to find host",
|
||||||
|
self.inv.delete_host_by_ip, existing_hosts, ip)
|
||||||
|
|
||||||
|
def test_purge_invalid_hosts(self):
|
||||||
|
proper_hostnames = ['node1', 'node2']
|
||||||
|
bad_host = 'doesnotbelong2'
|
||||||
|
existing_hosts = OrderedDict([
|
||||||
|
('node1', 'ansible_host=10.90.0.2 ip=10.90.0.2'),
|
||||||
|
('node2', 'ansible_host=10.90.0.3 ip=10.90.0.3'),
|
||||||
|
('doesnotbelong2', 'whateveropts=ilike')])
|
||||||
|
self.inv.config['all'] = existing_hosts
|
||||||
|
self.inv.purge_invalid_hosts(proper_hostnames)
|
||||||
|
self.assertTrue(bad_host not in self.inv.config['all'].keys())
|
||||||
|
|
||||||
|
def test_add_host_to_group(self):
|
||||||
|
group = 'etcd'
|
||||||
|
host = 'node1'
|
||||||
|
opts = 'ip=10.90.0.2'
|
||||||
|
|
||||||
|
self.inv.add_host_to_group(group, host, opts)
|
||||||
|
self.assertEqual(self.inv.config[group].get(host), opts)
|
||||||
|
|
||||||
|
def test_set_kube_master(self):
|
||||||
|
group = 'kube-master'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_kube_master([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_all(self):
|
||||||
|
group = 'all'
|
||||||
|
hosts = OrderedDict([
|
||||||
|
('node1', 'opt1'),
|
||||||
|
('node2', 'opt2')])
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
for host, opt in hosts.items():
|
||||||
|
self.assertEqual(self.inv.config[group].get(host), opt)
|
||||||
|
|
||||||
|
def test_set_k8s_cluster(self):
|
||||||
|
group = 'k8s-cluster:children'
|
||||||
|
expected_hosts = ['kube-node', 'kube-master']
|
||||||
|
|
||||||
|
self.inv.set_k8s_cluster()
|
||||||
|
for host in expected_hosts:
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_kube_node(self):
|
||||||
|
group = 'kube-node'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_kube_node([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_set_etcd(self):
|
||||||
|
group = 'etcd'
|
||||||
|
host = 'node1'
|
||||||
|
|
||||||
|
self.inv.set_etcd([host])
|
||||||
|
self.assertTrue(host in self.inv.config[group])
|
||||||
|
|
||||||
|
def test_scale_scenario_one(self):
|
||||||
|
num_nodes = 50
|
||||||
|
hosts = OrderedDict()
|
||||||
|
|
||||||
|
for hostid in range(1, num_nodes+1):
|
||||||
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
self.inv.set_etcd(hosts.keys()[0:3])
|
||||||
|
self.inv.set_kube_master(hosts.keys()[0:2])
|
||||||
|
self.inv.set_kube_node(hosts.keys())
|
||||||
|
for h in range(3):
|
||||||
|
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||||
|
|
||||||
|
def test_scale_scenario_two(self):
|
||||||
|
num_nodes = 500
|
||||||
|
hosts = OrderedDict()
|
||||||
|
|
||||||
|
for hostid in range(1, num_nodes+1):
|
||||||
|
hosts["node" + str(hostid)] = ""
|
||||||
|
|
||||||
|
self.inv.set_all(hosts)
|
||||||
|
self.inv.set_etcd(hosts.keys()[0:3])
|
||||||
|
self.inv.set_kube_master(hosts.keys()[3:5])
|
||||||
|
self.inv.set_kube_node(hosts.keys())
|
||||||
|
for h in range(5):
|
||||||
|
self.assertFalse(hosts.keys()[h] in self.inv.config['kube-node'])
|
||||||
28
contrib/inventory_builder/tox.ini
Normal file
28
contrib/inventory_builder/tox.ini
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
[tox]
|
||||||
|
minversion = 1.6
|
||||||
|
skipsdist = True
|
||||||
|
envlist = pep8, py27
|
||||||
|
|
||||||
|
[testenv]
|
||||||
|
whitelist_externals = py.test
|
||||||
|
usedevelop = True
|
||||||
|
deps =
|
||||||
|
-r{toxinidir}/requirements.txt
|
||||||
|
-r{toxinidir}/test-requirements.txt
|
||||||
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
|
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||||
|
commands = pytest -vv #{posargs:./tests}
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
usedevelop = False
|
||||||
|
whitelist_externals = bash
|
||||||
|
commands =
|
||||||
|
bash -c "find {toxinidir}/* -type f -name '*.py' -print0 | xargs -0 flake8"
|
||||||
|
|
||||||
|
[testenv:venv]
|
||||||
|
commands = {posargs}
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
show-source = true
|
||||||
|
builtins = _
|
||||||
|
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg
|
||||||
11
contrib/kvm-setup/README.md
Normal file
11
contrib/kvm-setup/README.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Kargo on KVM Virtual Machines hypervisor preparation
|
||||||
|
|
||||||
|
A simple playbook to ensure your system has the right settings to enable Kargo
|
||||||
|
deployment on VMs.
|
||||||
|
|
||||||
|
This playbook does not create Virtual Machines, nor does it run Kargo itself.
|
||||||
|
|
||||||
|
### User creation
|
||||||
|
|
||||||
|
If you want to create a user for running Kargo deployment, you should specify
|
||||||
|
both `k8s_deployment_user` and `k8s_deployment_user_pkey_path`.
|
||||||
3
contrib/kvm-setup/group_vars/all
Normal file
3
contrib/kvm-setup/group_vars/all
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
#k8s_deployment_user: kargo
|
||||||
|
#k8s_deployment_user_pkey_path: /tmp/ssh_rsa
|
||||||
|
|
||||||
8
contrib/kvm-setup/kvm-setup.yml
Normal file
8
contrib/kvm-setup/kvm-setup.yml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
gather_facts: False
|
||||||
|
become: yes
|
||||||
|
vars:
|
||||||
|
- bootstrap_os: none
|
||||||
|
roles:
|
||||||
|
- kvm-setup
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/main.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
|
||||||
|
- name: Upgrade all packages to the latest version (yum)
|
||||||
|
yum:
|
||||||
|
name: '*'
|
||||||
|
state: latest
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
with_items:
|
||||||
|
- bind-utils
|
||||||
|
- ntp
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
- name: Install required packages
|
||||||
|
apt:
|
||||||
|
upgrade: yes
|
||||||
|
update_cache: yes
|
||||||
|
cache_valid_time: 3600
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: latest
|
||||||
|
install_recommends: no
|
||||||
|
with_items:
|
||||||
|
- dnsutils
|
||||||
|
- ntp
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: Upgrade all packages to the latest version (apt)
|
||||||
|
shell: apt-get -o \
|
||||||
|
Dpkg::Options::=--force-confdef -o \
|
||||||
|
Dpkg::Options::=--force-confold -q -y \
|
||||||
|
dist-upgrade
|
||||||
|
environment:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
|
||||||
|
# Create deployment user if required
|
||||||
|
- include: user.yml
|
||||||
|
when: k8s_deployment_user is defined
|
||||||
|
|
||||||
|
# Set proper sysctl values
|
||||||
|
- include: sysctl.yml
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/sysctl.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: Load br_netfilter module
|
||||||
|
modprobe:
|
||||||
|
name: br_netfilter
|
||||||
|
state: present
|
||||||
|
register: br_netfilter
|
||||||
|
|
||||||
|
- name: Add br_netfilter into /etc/modules
|
||||||
|
lineinfile:
|
||||||
|
dest: /etc/modules
|
||||||
|
state: present
|
||||||
|
line: 'br_netfilter'
|
||||||
|
when: br_netfilter is defined and ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Add br_netfilter into /etc/modules-load.d/kargo.conf
|
||||||
|
copy:
|
||||||
|
dest: /etc/modules-load.d/kargo.conf
|
||||||
|
content: |-
|
||||||
|
### This file is managed by Ansible
|
||||||
|
br-netfilter
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
when: br_netfilter is defined
|
||||||
|
|
||||||
|
|
||||||
|
- name: Enable net.ipv4.ip_forward in sysctl
|
||||||
|
sysctl:
|
||||||
|
name: net.ipv4.ip_forward
|
||||||
|
value: 1
|
||||||
|
sysctl_file: /etc/sysctl.d/ipv4-ip_forward.conf
|
||||||
|
state: present
|
||||||
|
reload: yes
|
||||||
|
|
||||||
|
- name: Set bridge-nf-call-{arptables,iptables} to 0
|
||||||
|
sysctl:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: present
|
||||||
|
value: 0
|
||||||
|
sysctl_file: /etc/sysctl.d/bridge-nf-call.conf
|
||||||
|
reload: yes
|
||||||
|
with_items:
|
||||||
|
- net.bridge.bridge-nf-call-arptables
|
||||||
|
- net.bridge.bridge-nf-call-ip6tables
|
||||||
|
- net.bridge.bridge-nf-call-iptables
|
||||||
|
when: br_netfilter is defined
|
||||||
46
contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
Normal file
46
contrib/kvm-setup/roles/kvm-setup/tasks/user.yml
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
- name: Create user {{ k8s_deployment_user }}
|
||||||
|
user:
|
||||||
|
name: "{{ k8s_deployment_user }}"
|
||||||
|
groups: adm
|
||||||
|
shell: /bin/bash
|
||||||
|
|
||||||
|
- name: Ensure that .ssh exists
|
||||||
|
file:
|
||||||
|
path: "/home/{{ k8s_deployment_user }}/.ssh"
|
||||||
|
state: directory
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
|
||||||
|
- name: Configure sudo for deployment user
|
||||||
|
copy:
|
||||||
|
content: |
|
||||||
|
%{{ k8s_deployment_user }} ALL=(ALL) NOPASSWD: ALL
|
||||||
|
dest: "/etc/sudoers.d/55-k8s-deployment"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: Write private SSH key
|
||||||
|
copy:
|
||||||
|
src: "{{ k8s_deployment_user_pkey_path }}"
|
||||||
|
dest: "/home/{{ k8s_deployment_user }}/.ssh/id_rsa"
|
||||||
|
mode: 0400
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
|
|
||||||
|
- name: Write public SSH key
|
||||||
|
shell: "ssh-keygen -y -f /home/{{ k8s_deployment_user }}/.ssh/id_rsa \
|
||||||
|
> /home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
args:
|
||||||
|
creates: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
|
|
||||||
|
- name: Fix ssh-pub-key permissions
|
||||||
|
file:
|
||||||
|
path: "/home/{{ k8s_deployment_user }}/.ssh/authorized_keys"
|
||||||
|
mode: 0600
|
||||||
|
owner: "{{ k8s_deployment_user }}"
|
||||||
|
group: "{{ k8s_deployment_user }}"
|
||||||
|
when: k8s_deployment_user_pkey_path is defined
|
||||||
92
contrib/network-storage/glusterfs/README.md
Normal file
92
contrib/network-storage/glusterfs/README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# Deploying a Kargo Kubernetes Cluster with GlusterFS
|
||||||
|
|
||||||
|
You can either deploy using Ansible on its own by supplying your own inventory file or by using Terraform to create the VMs and then providing a dynamic inventory to Ansible. The following two sections are self-contained, you don't need to go through one to use the other. So, if you want to provision with Terraform, you can skip the **Using an Ansible inventory** section, and if you want to provision with a pre-built ansible inventory, you can neglect the **Using Terraform and Ansible** section.
|
||||||
|
|
||||||
|
## Using an Ansible inventory
|
||||||
|
|
||||||
|
In the same directory of this ReadMe file you should find a file named `inventory.example` which contains an example setup. Please note that, additionally to the Kubernetes nodes/masters, we define a set of machines for GlusterFS and we add them to the group `[gfs-cluster]`, which in turn is added to the larger `[network-storage]` group as a child group.
|
||||||
|
|
||||||
|
Change that file to reflect your local setup (adding more machines or removing them and setting the adequate ip numbers), and save it to `inventory/k8s_gfs_inventory`. Make sure that the settings on `inventory/group_vars/all.yml` make sense with your deployment. Then execute change to the kargo root folder, and execute (supposing that the machines are all using ubuntu):
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
This will provision your Kubernetes cluster. Then, to provision and configure the GlusterFS cluster, from the same directory execute:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i inventory/k8s_gfs_inventory --user=ubuntu ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
If your machines are not using Ubuntu, you need to change the `--user=ubuntu` to the correct user. Alternatively, if your Kubernetes machines are using one OS and your GlusterFS a different one, you can instead specify the `ansible_ssh_user=<correct-user>` variable in the inventory file that you just created, for each machine/VM:
|
||||||
|
|
||||||
|
```
|
||||||
|
k8s-master-1 ansible_ssh_host=192.168.0.147 ip=192.168.0.147 ansible_ssh_user=core
|
||||||
|
k8s-master-node-1 ansible_ssh_host=192.168.0.148 ip=192.168.0.148 ansible_ssh_user=core
|
||||||
|
k8s-master-node-2 ansible_ssh_host=192.168.0.146 ip=192.168.0.146 ansible_ssh_user=core
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using Terraform and Ansible
|
||||||
|
|
||||||
|
First step is to fill in a `my-kargo-gluster-cluster.tfvars` file with the specification desired for your cluster. An example with all required variables would look like:
|
||||||
|
|
||||||
|
```
|
||||||
|
cluster_name = "cluster1"
|
||||||
|
number_of_k8s_masters = "1"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "2"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "0"
|
||||||
|
number_of_k8s_nodes = "0"
|
||||||
|
public_key_path = "~/.ssh/my-desired-key.pub"
|
||||||
|
image = "Ubuntu 16.04"
|
||||||
|
ssh_user = "ubuntu"
|
||||||
|
flavor_k8s_node = "node-flavor-id-in-your-openstack"
|
||||||
|
flavor_k8s_master = "master-flavor-id-in-your-openstack"
|
||||||
|
network_name = "k8s-network"
|
||||||
|
floatingip_pool = "net_external"
|
||||||
|
|
||||||
|
# GlusterFS variables
|
||||||
|
flavor_gfs_node = "gluster-flavor-id-in-your-openstack"
|
||||||
|
image_gfs = "Ubuntu 16.04"
|
||||||
|
number_of_gfs_nodes_no_floating_ip = "3"
|
||||||
|
gfs_volume_size_in_gb = "50"
|
||||||
|
ssh_user_gfs = "ubuntu"
|
||||||
|
```
|
||||||
|
|
||||||
|
As explained in the general terraform/openstack guide, you need to source your OpenStack credentials file, add your ssh-key to the ssh-agent and setup environment variables for terraform:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ source ~/.stackrc
|
||||||
|
$ eval $(ssh-agent -s)
|
||||||
|
$ ssh-add ~/.ssh/my-desired-key
|
||||||
|
$ echo Setting up Terraform creds && \
|
||||||
|
export TF_VAR_username=${OS_USERNAME} && \
|
||||||
|
export TF_VAR_password=${OS_PASSWORD} && \
|
||||||
|
export TF_VAR_tenant=${OS_TENANT_NAME} && \
|
||||||
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
|
```
|
||||||
|
|
||||||
|
Then, standing on the kargo directory (root base of the Git checkout), issue the following terraform command to create the VMs for the cluster:
|
||||||
|
|
||||||
|
```
|
||||||
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
This will create both your Kubernetes and Gluster VMs. Make sure that the ansible file `contrib/terraform/openstack/group_vars/all.yml` includes any ansible variable that you want to setup (like, for instance, the type of machine for bootstrapping).
|
||||||
|
|
||||||
|
Then, provision your Kubernetes (Kargo) cluster with the following ansible call:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./cluster.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
Finally, provision the glusterfs nodes and add the Persistent Volume setup for GlusterFS in Kubernetes through the following ansible call:
|
||||||
|
|
||||||
|
```
|
||||||
|
ansible-playbook -b --become-user=root -i contrib/terraform/openstack/hosts ./contrib/network-storage/glusterfs/glusterfs.yml
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to destroy the cluster, you can run:
|
||||||
|
|
||||||
|
```
|
||||||
|
terraform destroy -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-kargo-gluster-cluster.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
17
contrib/network-storage/glusterfs/glusterfs.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: true
|
||||||
|
|
||||||
|
- hosts: gfs-cluster
|
||||||
|
roles:
|
||||||
|
- { role: glusterfs/server }
|
||||||
|
|
||||||
|
- hosts: k8s-cluster
|
||||||
|
roles:
|
||||||
|
- { role: glusterfs/client }
|
||||||
|
|
||||||
|
- hosts: kube-master[0]
|
||||||
|
roles:
|
||||||
|
- { role: kubernetes-pv/lib }
|
||||||
|
- { role: kubernetes-pv }
|
||||||
|
|
||||||
44
contrib/network-storage/glusterfs/inventory.example
Normal file
44
contrib/network-storage/glusterfs/inventory.example
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# ## Configure 'ip' variable to bind kubernetes services on a
|
||||||
|
# ## different ip than the default iface
|
||||||
|
# node1 ansible_ssh_host=95.54.0.12 # ip=10.3.0.1
|
||||||
|
# node2 ansible_ssh_host=95.54.0.13 # ip=10.3.0.2
|
||||||
|
# node3 ansible_ssh_host=95.54.0.14 # ip=10.3.0.3
|
||||||
|
# node4 ansible_ssh_host=95.54.0.15 # ip=10.3.0.4
|
||||||
|
# node5 ansible_ssh_host=95.54.0.16 # ip=10.3.0.5
|
||||||
|
# node6 ansible_ssh_host=95.54.0.17 # ip=10.3.0.6
|
||||||
|
#
|
||||||
|
# ## GlusterFS nodes
|
||||||
|
# ## Set disk_volume_device_1 to desired device for gluster brick, if different to /dev/vdb (default).
|
||||||
|
# ## As in the previous case, you can set ip to give direct communication on internal IPs
|
||||||
|
# gfs_node1 ansible_ssh_host=95.54.0.18 # disk_volume_device_1=/dev/vdc ip=10.3.0.7
|
||||||
|
# gfs_node2 ansible_ssh_host=95.54.0.19 # disk_volume_device_1=/dev/vdc ip=10.3.0.8
|
||||||
|
# gfs_node1 ansible_ssh_host=95.54.0.20 # disk_volume_device_1=/dev/vdc ip=10.3.0.9
|
||||||
|
|
||||||
|
# [kube-master]
|
||||||
|
# node1
|
||||||
|
# node2
|
||||||
|
|
||||||
|
# [etcd]
|
||||||
|
# node1
|
||||||
|
# node2
|
||||||
|
# node3
|
||||||
|
|
||||||
|
# [kube-node]
|
||||||
|
# node2
|
||||||
|
# node3
|
||||||
|
# node4
|
||||||
|
# node5
|
||||||
|
# node6
|
||||||
|
|
||||||
|
# [k8s-cluster:children]
|
||||||
|
# kube-node
|
||||||
|
# kube-master
|
||||||
|
|
||||||
|
# [gfs-cluster]
|
||||||
|
# gfs_node1
|
||||||
|
# gfs_node2
|
||||||
|
# gfs_node3
|
||||||
|
|
||||||
|
# [network-storage:children]
|
||||||
|
# gfs-cluster
|
||||||
|
|
||||||
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
44
contrib/network-storage/glusterfs/roles/glusterfs/README.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Ansible Role: GlusterFS
|
||||||
|
|
||||||
|
[](https://travis-ci.org/geerlingguy/ansible-role-glusterfs)
|
||||||
|
|
||||||
|
Installs and configures GlusterFS on Linux.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
For GlusterFS to connect between servers, TCP ports `24007`, `24008`, and `24009`/`49152`+ (that port, plus an additional incremented port for each additional server in the cluster; the latter if GlusterFS is version 3.4+), and TCP/UDP port `111` must be open. You can open these using whatever firewall you wish (this can easily be configured using the `geerlingguy.firewall` role).
|
||||||
|
|
||||||
|
This role performs basic installation and setup of Gluster, but it does not configure or mount bricks (volumes), since that step is easier to do in a series of plays in your own playbook. Ansible 1.9+ includes the [`gluster_volume`](https://docs.ansible.com/gluster_volume_module.html) module to ease the management of Gluster volumes.
|
||||||
|
|
||||||
|
## Role Variables
|
||||||
|
|
||||||
|
Available variables are listed below, along with default values (see `defaults/main.yml`):
|
||||||
|
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
|
||||||
|
You can specify a `default_release` for apt on Debian/Ubuntu by overriding this variable. This is helpful if you need a different package or version for the main GlusterFS packages (e.g. GlusterFS 3.5.x instead of 3.2.x with the `wheezy-backports` default release on Debian Wheezy).
|
||||||
|
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.5"
|
||||||
|
|
||||||
|
For Ubuntu, specify whether to use the official Gluster PPA, and which version of the PPA to use. See Gluster's [Getting Started Guide](http://www.gluster.org/community/documentation/index.php/Getting_started_install) for more info.
|
||||||
|
|
||||||
|
## Dependencies
|
||||||
|
|
||||||
|
None.
|
||||||
|
|
||||||
|
## Example Playbook
|
||||||
|
|
||||||
|
- hosts: server
|
||||||
|
roles:
|
||||||
|
- geerlingguy.glusterfs
|
||||||
|
|
||||||
|
For a real-world use example, read through [Simple GlusterFS Setup with Ansible](http://www.jeffgeerling.com/blog/simple-glusterfs-setup-ansible), a blog post by this role's author, which is included in Chapter 8 of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT / BSD
|
||||||
|
|
||||||
|
## Author Information
|
||||||
|
|
||||||
|
This role was created in 2015 by [Jeff Geerling](http://www.jeffgeerling.com/), author of [Ansible for DevOps](https://www.ansiblefordevops.com/).
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
# For Ubuntu.
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.8"
|
||||||
|
|
||||||
|
# Gluster configuration.
|
||||||
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||||
|
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||||
|
gluster_brick_name: gluster
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
dependencies: []
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: geerlingguy
|
||||||
|
description: GlusterFS installation for Linux.
|
||||||
|
company: "Midwestern Mac, LLC"
|
||||||
|
license: "license (BSD, MIT)"
|
||||||
|
min_ansible_version: 2.0
|
||||||
|
platforms:
|
||||||
|
- name: EL
|
||||||
|
versions:
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- name: Ubuntu
|
||||||
|
versions:
|
||||||
|
- precise
|
||||||
|
- trusty
|
||||||
|
- xenial
|
||||||
|
- name: Debian
|
||||||
|
versions:
|
||||||
|
- wheezy
|
||||||
|
- jessie
|
||||||
|
galaxy_tags:
|
||||||
|
- system
|
||||||
|
- networking
|
||||||
|
- cloud
|
||||||
|
- clustering
|
||||||
|
- files
|
||||||
|
- sharing
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
# This is meant for Ubuntu and RedHat installations, where apparently the glusterfs-client is not used from inside
|
||||||
|
# hyperkube and needs to be installed as part of the system.
|
||||||
|
|
||||||
|
# Setup/install tasks.
|
||||||
|
- include: setup-RedHat.yml
|
||||||
|
when: ansible_os_family == 'RedHat' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
|
- include: setup-Debian.yml
|
||||||
|
when: ansible_os_family == 'Debian' and groups['gfs-cluster'] is defined
|
||||||
|
|
||||||
|
- name: Ensure Gluster mount directories exist.
|
||||||
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
|
with_items:
|
||||||
|
- "{{ gluster_mount_dir }}"
|
||||||
|
when: ansible_os_family in ["Debian","RedHat"] and groups['gfs-cluster'] is defined
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: Add PPA for GlusterFS.
|
||||||
|
apt_repository:
|
||||||
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
register: glusterfs_ppa_added
|
||||||
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS client will reinstall if the PPA was just added.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
|
when: glusterfs_ppa_added.changed
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS client is installed.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
default_release: "{{ glusterfs_default_release }}"
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Install Prerequisites
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
|
- name: Install Packages
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
# For Ubuntu.
|
||||||
|
glusterfs_default_release: ""
|
||||||
|
glusterfs_ppa_use: yes
|
||||||
|
glusterfs_ppa_version: "3.8"
|
||||||
|
|
||||||
|
# Gluster configuration.
|
||||||
|
gluster_mount_dir: /mnt/gluster
|
||||||
|
gluster_volume_node_mount_dir: /mnt/xfs-drive-gluster
|
||||||
|
gluster_brick_dir: "{{ gluster_volume_node_mount_dir }}/brick"
|
||||||
|
gluster_brick_name: gluster
|
||||||
|
# Default device to mount for xfs formatting, terraform overrides this by setting the variable in the inventory.
|
||||||
|
disk_volume_device_1: /dev/vdb
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
dependencies: []
|
||||||
|
|
||||||
|
galaxy_info:
|
||||||
|
author: geerlingguy
|
||||||
|
description: GlusterFS installation for Linux.
|
||||||
|
company: "Midwestern Mac, LLC"
|
||||||
|
license: "license (BSD, MIT)"
|
||||||
|
min_ansible_version: 2.0
|
||||||
|
platforms:
|
||||||
|
- name: EL
|
||||||
|
versions:
|
||||||
|
- 6
|
||||||
|
- 7
|
||||||
|
- name: Ubuntu
|
||||||
|
versions:
|
||||||
|
- precise
|
||||||
|
- trusty
|
||||||
|
- xenial
|
||||||
|
- name: Debian
|
||||||
|
versions:
|
||||||
|
- wheezy
|
||||||
|
- jessie
|
||||||
|
galaxy_tags:
|
||||||
|
- system
|
||||||
|
- networking
|
||||||
|
- cloud
|
||||||
|
- clustering
|
||||||
|
- files
|
||||||
|
- sharing
|
||||||
@@ -0,0 +1,82 @@
|
|||||||
|
---
|
||||||
|
# Include variables and define needed variables.
|
||||||
|
- name: Include OS-specific variables.
|
||||||
|
include_vars: "{{ ansible_os_family }}.yml"
|
||||||
|
|
||||||
|
# Instal xfs package
|
||||||
|
- name: install xfs Debian
|
||||||
|
apt: name=xfsprogs state=present
|
||||||
|
when: ansible_os_family == "Debian"
|
||||||
|
|
||||||
|
- name: install xfs RedHat
|
||||||
|
yum: name=xfsprogs state=present
|
||||||
|
when: ansible_os_family == "RedHat"
|
||||||
|
|
||||||
|
# Format external volumes in xfs
|
||||||
|
- name: Format volumes in xfs
|
||||||
|
filesystem: "fstype=xfs dev={{ disk_volume_device_1 }}"
|
||||||
|
|
||||||
|
# Mount external volumes
|
||||||
|
- name: mounting new xfs filesystem
|
||||||
|
mount: "name={{ gluster_volume_node_mount_dir }} src={{ disk_volume_device_1 }} fstype=xfs state=mounted"
|
||||||
|
|
||||||
|
# Setup/install tasks.
|
||||||
|
- include: setup-RedHat.yml
|
||||||
|
when: ansible_os_family == 'RedHat'
|
||||||
|
|
||||||
|
- include: setup-Debian.yml
|
||||||
|
when: ansible_os_family == 'Debian'
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS is started and enabled at boot.
|
||||||
|
service: "name={{ glusterfs_daemon }} state=started enabled=yes"
|
||||||
|
|
||||||
|
- name: Ensure Gluster brick and mount directories exist.
|
||||||
|
file: "path={{ item }} state=directory mode=0775"
|
||||||
|
with_items:
|
||||||
|
- "{{ gluster_brick_dir }}"
|
||||||
|
- "{{ gluster_mount_dir }}"
|
||||||
|
|
||||||
|
- name: Configure Gluster volume.
|
||||||
|
gluster_volume:
|
||||||
|
state: present
|
||||||
|
name: "{{ gluster_brick_name }}"
|
||||||
|
brick: "{{ gluster_brick_dir }}"
|
||||||
|
replicas: "{{ groups['gfs-cluster'] | length }}"
|
||||||
|
cluster: "{% for item in groups['gfs-cluster'] -%}{{ hostvars[item]['ip']|default(hostvars[item].ansible_default_ipv4['address']) }}{% if not loop.last %},{% endif %}{%- endfor %}"
|
||||||
|
host: "{{ inventory_hostname }}"
|
||||||
|
force: yes
|
||||||
|
run_once: true
|
||||||
|
|
||||||
|
- name: Mount glusterfs to retrieve disk size
|
||||||
|
mount:
|
||||||
|
name: "{{ gluster_mount_dir }}"
|
||||||
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
|
fstype: glusterfs
|
||||||
|
opts: "defaults,_netdev"
|
||||||
|
state: mounted
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Get Gluster disk size
|
||||||
|
setup: filter=ansible_mounts
|
||||||
|
register: mounts_data
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Set Gluster disk size to variable
|
||||||
|
set_fact:
|
||||||
|
gluster_disk_size_gb: "{{ (mounts_data.ansible_facts.ansible_mounts | selectattr('mount', 'equalto', gluster_mount_dir) | map(attribute='size_total') | first | int / (1024*1024*1024)) | int }}"
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Create file on GlusterFS
|
||||||
|
template:
|
||||||
|
dest: "{{ gluster_mount_dir }}/.test-file.txt"
|
||||||
|
src: test-file.txt
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
|
- name: Unmount glusterfs
|
||||||
|
mount:
|
||||||
|
name: "{{ gluster_mount_dir }}"
|
||||||
|
fstype: glusterfs
|
||||||
|
src: "{{ ip|default(ansible_default_ipv4['address']) }}:/gluster"
|
||||||
|
state: unmounted
|
||||||
|
when: groups['gfs-cluster'] is defined and inventory_hostname == groups['gfs-cluster'][0]
|
||||||
|
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: Add PPA for GlusterFS.
|
||||||
|
apt_repository:
|
||||||
|
repo: 'ppa:gluster/glusterfs-{{ glusterfs_ppa_version }}'
|
||||||
|
state: present
|
||||||
|
update_cache: yes
|
||||||
|
register: glusterfs_ppa_added
|
||||||
|
when: glusterfs_ppa_use
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS will reinstall if the PPA was just added.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: absent
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
|
when: glusterfs_ppa_added.changed
|
||||||
|
|
||||||
|
- name: Ensure GlusterFS is installed.
|
||||||
|
apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
default_release: "{{ glusterfs_default_release }}"
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- name: Install Prerequisites
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- "centos-release-gluster{{ glusterfs_default_release }}"
|
||||||
|
|
||||||
|
- name: Install Packages
|
||||||
|
yum: name={{ item }} state=present
|
||||||
|
with_items:
|
||||||
|
- glusterfs-server
|
||||||
|
- glusterfs-client
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
test file
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role_under_test
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
glusterfs_daemon: glusterfs-server
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
glusterfs_daemon: glusterd
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: Kubernetes Apps | Lay Down k8s GlusterFS Endpoint and PV
|
||||||
|
template: src={{item.file}} dest={{kube_config_dir}}/{{item.dest}}
|
||||||
|
with_items:
|
||||||
|
- { file: glusterfs-kubernetes-endpoint.json.j2, type: ep, dest: glusterfs-kubernetes-endpoint.json}
|
||||||
|
- { file: glusterfs-kubernetes-pv.yml.j2, type: pv, dest: glusterfs-kubernetes-pv.yml}
|
||||||
|
register: gluster_pv
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined and hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb is defined
|
||||||
|
|
||||||
|
- name: Kubernetes Apps | Set GlusterFS endpoint and PV
|
||||||
|
kube:
|
||||||
|
name: glusterfs
|
||||||
|
namespace: default
|
||||||
|
kubectl: "{{bin_dir}}/kubectl"
|
||||||
|
resource: "{{item.item.type}}"
|
||||||
|
filename: "{{kube_config_dir}}/{{item.item.dest}}"
|
||||||
|
state: "{{item.changed | ternary('latest','present') }}"
|
||||||
|
with_items: "{{ gluster_pv.results }}"
|
||||||
|
when: inventory_hostname == groups['kube-master'][0] and groups['gfs-cluster'] is defined
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"kind": "Endpoints",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"metadata": {
|
||||||
|
"name": "glusterfs"
|
||||||
|
},
|
||||||
|
"subsets": [
|
||||||
|
{% for host in groups['gfs-cluster'] %}
|
||||||
|
{
|
||||||
|
"addresses": [
|
||||||
|
{
|
||||||
|
"ip": "{{hostvars[host]['ip']|default(hostvars[host].ansible_default_ipv4['address'])}}"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"port": 1
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}{%- if not loop.last %}, {% endif -%}
|
||||||
|
{% endfor %}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: glusterfs
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: "{{ hostvars[groups['gfs-cluster'][0]].gluster_disk_size_gb }}Gi"
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
glusterfs:
|
||||||
|
endpoints: glusterfs
|
||||||
|
path: gluster
|
||||||
|
readOnly: false
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
dependencies:
|
||||||
|
- {role: kubernetes-pv/ansible, tags: apps}
|
||||||
2
contrib/terraform/aws/.gitignore
vendored
2
contrib/terraform/aws/.gitignore
vendored
@@ -1,2 +1,2 @@
|
|||||||
*.tfstate*
|
*.tfstate*
|
||||||
inventory
|
.terraform
|
||||||
|
|||||||
@@ -1,261 +0,0 @@
|
|||||||
variable "deploymentName" {
|
|
||||||
type = "string"
|
|
||||||
description = "The desired name of your deployment."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numControllers"{
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of controllers."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numEtcd" {
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of etcd nodes. Should be an odd number."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "numNodes" {
|
|
||||||
type = "string"
|
|
||||||
description = "Desired # of nodes."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeController" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for the controllers (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeEtcd" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for etcd (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "volSizeNodes" {
|
|
||||||
type = "string"
|
|
||||||
description = "Volume size for nodes (GB)."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "subnet" {
|
|
||||||
type = "string"
|
|
||||||
description = "The subnet in which to put your cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "securityGroups" {
|
|
||||||
type = "string"
|
|
||||||
description = "The sec. groups in which to put your cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "ami"{
|
|
||||||
type = "string"
|
|
||||||
description = "AMI to use for all VMs in cluster."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "SSHKey" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH key to use for VMs."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "master_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for masters."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "etcd_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for etcd."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "node_instance_type" {
|
|
||||||
type = "string"
|
|
||||||
description = "Size of VM to use for nodes."
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "terminate_protect" {
|
|
||||||
type = "string"
|
|
||||||
default = "false"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "awsRegion" {
|
|
||||||
type = "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "aws" {
|
|
||||||
region = "${var.awsRegion}"
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "iam_prefix" {
|
|
||||||
type = "string"
|
|
||||||
description = "Prefix name for IAM profiles"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kubernetes_master_profile" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_profile"
|
|
||||||
roles = ["${aws_iam_role.kubernetes_master_role.name}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role" "kubernetes_master_role" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_role"
|
|
||||||
assume_role_policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kubernetes_master_policy" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_master_policy"
|
|
||||||
role = "${aws_iam_role.kubernetes_master_role.id}"
|
|
||||||
policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": ["ec2:*"],
|
|
||||||
"Resource": ["*"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": ["elasticloadbalancing:*"],
|
|
||||||
"Resource": ["*"]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "s3:*",
|
|
||||||
"Resource": "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_instance_profile" "kubernetes_node_profile" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_profile"
|
|
||||||
roles = ["${aws_iam_role.kubernetes_node_role.name}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role" "kubernetes_node_role" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_role"
|
|
||||||
assume_role_policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Principal": { "Service": "ec2.amazonaws.com"},
|
|
||||||
"Action": "sts:AssumeRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_iam_role_policy" "kubernetes_node_policy" {
|
|
||||||
name = "${var.iam_prefix}_kubernetes_node_policy"
|
|
||||||
role = "${aws_iam_role.kubernetes_node_role.id}"
|
|
||||||
policy = <<EOF
|
|
||||||
{
|
|
||||||
"Version": "2012-10-17",
|
|
||||||
"Statement": [
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "s3:*",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:Describe*",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:AttachVolume",
|
|
||||||
"Resource": "*"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Effect": "Allow",
|
|
||||||
"Action": "ec2:DetachVolume",
|
|
||||||
"Resource": "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "master" {
|
|
||||||
count = "${var.numControllers}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.master_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeController}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-master-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "etcd" {
|
|
||||||
count = "${var.numEtcd}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.etcd_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeEtcd}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-etcd-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource "aws_instance" "minion" {
|
|
||||||
count = "${var.numNodes}"
|
|
||||||
ami = "${var.ami}"
|
|
||||||
instance_type = "${var.node_instance_type}"
|
|
||||||
subnet_id = "${var.subnet}"
|
|
||||||
vpc_security_group_ids = ["${var.securityGroups}"]
|
|
||||||
key_name = "${var.SSHKey}"
|
|
||||||
disable_api_termination = "${var.terminate_protect}"
|
|
||||||
iam_instance_profile = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
|
||||||
root_block_device {
|
|
||||||
volume_size = "${var.volSizeNodes}"
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
Name = "${var.deploymentName}-minion-${count.index + 1}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubernetes_master_profile" {
|
|
||||||
value = "${aws_iam_instance_profile.kubernetes_master_profile.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "kubernetes_node_profile" {
|
|
||||||
value = "${aws_iam_instance_profile.kubernetes_node_profile.id}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "master-ip" {
|
|
||||||
value = "${join(", ", aws_instance.master.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "etcd-ip" {
|
|
||||||
value = "${join(", ", aws_instance.etcd.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output "minion-ip" {
|
|
||||||
value = "${join(", ", aws_instance.minion.*.private_ip)}"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
variable "SSHUser" {
|
|
||||||
type = "string"
|
|
||||||
description = "SSH User for VMs."
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "null_resource" "ansible-provision" {
|
|
||||||
|
|
||||||
depends_on = ["aws_instance.master","aws_instance.etcd","aws_instance.minion"]
|
|
||||||
|
|
||||||
##Create Master Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"[kube-master]\" > inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.master.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
##Create ETCD Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[etcd]\" >> inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.etcd.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
##Create Nodes Inventory
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[kube-node]\" >> inventory"
|
|
||||||
}
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"${join("\n",formatlist("%s ansible_ssh_user=%s", aws_instance.minion.*.private_ip, var.SSHUser))}\" >> inventory"
|
|
||||||
}
|
|
||||||
|
|
||||||
provisioner "local-exec" {
|
|
||||||
command = "echo \"\n[k8s-cluster:children]\nkube-node\nkube-master\netcd\" >> inventory"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -2,27 +2,34 @@
|
|||||||
|
|
||||||
**Overview:**
|
**Overview:**
|
||||||
|
|
||||||
- This will create nodes in a VPC inside of AWS
|
This project will create:
|
||||||
|
* VPC with Public and Private Subnets in # Availability Zones
|
||||||
|
* Bastion Hosts and NAT Gateways in the Public Subnet
|
||||||
|
* A dynamic number of masters, etcd, and worker nodes in the Private Subnet
|
||||||
|
* even distributed over the # of Availability Zones
|
||||||
|
* AWS ELB in the Public Subnet for accessing the Kubernetes API from the internet
|
||||||
|
|
||||||
- A dynamic number of masters, etcd, and nodes can be created
|
**Requirements**
|
||||||
|
- Terraform 0.8.7 or newer
|
||||||
- These scripts currently expect Private IP connectivity with the nodes that are created. This means that you may need a tunnel to your VPC or to run these scripts from a VM inside the VPC. Will be looking into how to work around this later.
|
|
||||||
|
|
||||||
**How to Use:**
|
**How to Use:**
|
||||||
|
|
||||||
- Export the variables for your Amazon credentials:
|
- Export the variables for your AWS credentials or edit credentials.tfvars:
|
||||||
|
|
||||||
```
|
```
|
||||||
export AWS_ACCESS_KEY_ID="xxx"
|
export aws_access_key="xxx"
|
||||||
export AWS_SECRET_ACCESS_KEY="yyy"
|
export aws_secret_key="yyy"
|
||||||
|
export aws_ssh_key_name="zzz"
|
||||||
```
|
```
|
||||||
|
|
||||||
- Update contrib/terraform/aws/terraform.tfvars with your data
|
- Update contrib/terraform/aws/terraform.tfvars with your data
|
||||||
|
|
||||||
- Run with `terraform apply`
|
- Run with `terraform apply -var-file="credentials.tfvars"` or `terraform apply` depending if you exported your AWS credentials
|
||||||
|
|
||||||
- Once the infrastructure is created, you can run the kubespray playbooks and supply contrib/terraform/aws/inventory with the `-i` flag.
|
- Once the infrastructure is created, you can run the kargo playbooks and supply inventory/hosts with the `-i` flag.
|
||||||
|
|
||||||
**Future Work:**
|
**Architecture**
|
||||||
|
|
||||||
- Update the inventory creation file to be something a little more reasonable. It's just a local-exec from Terraform now, using terraform.py or something may make sense in the future.
|
Pictured is an AWS Infrastructure created with this Terraform project distributed over two Availability Zones.
|
||||||
|
|
||||||
|

|
||||||
|
|||||||
185
contrib/terraform/aws/create-infrastructure.tf
Normal file
185
contrib/terraform/aws/create-infrastructure.tf
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 0.8.7"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "aws" {
|
||||||
|
access_key = "${var.AWS_ACCESS_KEY_ID}"
|
||||||
|
secret_key = "${var.AWS_SECRET_ACCESS_KEY}"
|
||||||
|
region = "${var.AWS_DEFAULT_REGION}"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calling modules who create the initial AWS VPC / AWS ELB
|
||||||
|
* and AWS IAM Roles for Kubernetes Deployment
|
||||||
|
*/
|
||||||
|
|
||||||
|
module "aws-vpc" {
|
||||||
|
source = "modules/vpc"
|
||||||
|
|
||||||
|
aws_cluster_name = "${var.aws_cluster_name}"
|
||||||
|
aws_vpc_cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
|
aws_avail_zones="${var.aws_avail_zones}"
|
||||||
|
|
||||||
|
aws_cidr_subnets_private="${var.aws_cidr_subnets_private}"
|
||||||
|
aws_cidr_subnets_public="${var.aws_cidr_subnets_public}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module "aws-elb" {
|
||||||
|
source = "modules/elb"
|
||||||
|
|
||||||
|
aws_cluster_name="${var.aws_cluster_name}"
|
||||||
|
aws_vpc_id="${module.aws-vpc.aws_vpc_id}"
|
||||||
|
aws_avail_zones="${var.aws_avail_zones}"
|
||||||
|
aws_subnet_ids_public="${module.aws-vpc.aws_subnet_ids_public}"
|
||||||
|
aws_elb_api_port = "${var.aws_elb_api_port}"
|
||||||
|
k8s_secure_api_port = "${var.k8s_secure_api_port}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
module "aws-iam" {
|
||||||
|
source = "modules/iam"
|
||||||
|
|
||||||
|
aws_cluster_name="${var.aws_cluster_name}"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create Bastion Instances in AWS
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
resource "aws_instance" "bastion-server" {
|
||||||
|
ami = "${var.aws_bastion_ami}"
|
||||||
|
instance_type = "${var.aws_bastion_size}"
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
associate_public_ip_address = true
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_public,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-bastion-${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "bastion-${var.aws_cluster_name}-${count.index}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create K8s Master and worker nodes and etcd instances
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-master" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_kube_master_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_kube_master_num}"
|
||||||
|
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
|
||||||
|
iam_instance_profile = "${module.aws-iam.kube-master-profile}"
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-master${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "master"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_elb_attachment" "attach_master_nodes" {
|
||||||
|
count = "${var.aws_kube_master_num}"
|
||||||
|
elb = "${module.aws-elb.aws_elb_api_id}"
|
||||||
|
instance = "${element(aws_instance.k8s-master.*.id,count.index)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-etcd" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_etcd_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_etcd_num}"
|
||||||
|
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-etcd${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "etcd"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_instance" "k8s-worker" {
|
||||||
|
ami = "${var.aws_cluster_ami}"
|
||||||
|
instance_type = "${var.aws_kube_worker_size}"
|
||||||
|
|
||||||
|
count = "${var.aws_kube_worker_num}"
|
||||||
|
|
||||||
|
availability_zone = "${element(var.aws_avail_zones,count.index)}"
|
||||||
|
subnet_id = "${element(module.aws-vpc.aws_subnet_ids_private,count.index)}"
|
||||||
|
|
||||||
|
vpc_security_group_ids = [ "${module.aws-vpc.aws_security_group}" ]
|
||||||
|
|
||||||
|
iam_instance_profile = "${module.aws-iam.kube-worker-profile}"
|
||||||
|
key_name = "${var.AWS_SSH_KEY_NAME}"
|
||||||
|
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-worker${count.index}"
|
||||||
|
Cluster = "${var.aws_cluster_name}"
|
||||||
|
Role = "worker"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Create Kargo Inventory File
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
data "template_file" "inventory" {
|
||||||
|
template = "${file("${path.module}/templates/inventory.tpl")}"
|
||||||
|
|
||||||
|
vars {
|
||||||
|
public_ip_address_bastion = "${join("\n",formatlist("bastion ansible_ssh_host=%s" , aws_instance.bastion-server.*.public_ip))}"
|
||||||
|
connection_strings_master = "${join("\n",formatlist("%s ansible_ssh_host=%s",aws_instance.k8s-master.*.tags.Name, aws_instance.k8s-master.*.private_ip))}"
|
||||||
|
connection_strings_node = "${join("\n", formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-worker.*.tags.Name, aws_instance.k8s-worker.*.private_ip))}"
|
||||||
|
connection_strings_etcd = "${join("\n",formatlist("%s ansible_ssh_host=%s", aws_instance.k8s-etcd.*.tags.Name, aws_instance.k8s-etcd.*.private_ip))}"
|
||||||
|
list_master = "${join("\n",aws_instance.k8s-master.*.tags.Name)}"
|
||||||
|
list_node = "${join("\n",aws_instance.k8s-worker.*.tags.Name)}"
|
||||||
|
list_etcd = "${join("\n",aws_instance.k8s-etcd.*.tags.Name)}"
|
||||||
|
elb_api_fqdn = "apiserver_loadbalancer_domain_name=\"${module.aws-elb.aws_elb_api_fqdn}\""
|
||||||
|
elb_api_port = "loadbalancer_apiserver.port=${var.aws_elb_api_port}"
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "null_resource" "inventories" {
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "echo '${data.template_file.inventory.rendered}' > ../../../inventory/hosts"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
8
contrib/terraform/aws/credentials.tfvars.example
Normal file
8
contrib/terraform/aws/credentials.tfvars.example
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
#AWS Access Key
|
||||||
|
AWS_ACCESS_KEY_ID = ""
|
||||||
|
#AWS Secret Key
|
||||||
|
AWS_SECRET_ACCESS_KEY = ""
|
||||||
|
#EC2 SSH Key Name
|
||||||
|
AWS_SSH_KEY_NAME = ""
|
||||||
|
#AWS Region
|
||||||
|
AWS_DEFAULT_REGION = "eu-central-1"
|
||||||
BIN
contrib/terraform/aws/docs/aws_kargo.png
Normal file
BIN
contrib/terraform/aws/docs/aws_kargo.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 114 KiB |
58
contrib/terraform/aws/modules/elb/main.tf
Normal file
58
contrib/terraform/aws/modules/elb/main.tf
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
resource "aws_security_group" "aws-elb" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
|
vpc_id = "${var.aws_vpc_id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-securitygroup-elb"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "aws-allow-api-access" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = "${var.aws_elb_api_port}"
|
||||||
|
to_port = "${var.k8s_secure_api_port}"
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "aws-allow-api-egress" {
|
||||||
|
type = "egress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.aws-elb.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create a new AWS ELB for K8S API
|
||||||
|
resource "aws_elb" "aws-elb-api" {
|
||||||
|
name = "kubernetes-elb-${var.aws_cluster_name}"
|
||||||
|
subnets = ["${var.aws_subnet_ids_public}"]
|
||||||
|
security_groups = ["${aws_security_group.aws-elb.id}"]
|
||||||
|
|
||||||
|
listener {
|
||||||
|
instance_port = "${var.k8s_secure_api_port}"
|
||||||
|
instance_protocol = "tcp"
|
||||||
|
lb_port = "${var.aws_elb_api_port}"
|
||||||
|
lb_protocol = "tcp"
|
||||||
|
}
|
||||||
|
|
||||||
|
health_check {
|
||||||
|
healthy_threshold = 2
|
||||||
|
unhealthy_threshold = 2
|
||||||
|
timeout = 3
|
||||||
|
target = "HTTP:8080/"
|
||||||
|
interval = 30
|
||||||
|
}
|
||||||
|
|
||||||
|
cross_zone_load_balancing = true
|
||||||
|
idle_timeout = 400
|
||||||
|
connection_draining = true
|
||||||
|
connection_draining_timeout = 400
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-elb-api"
|
||||||
|
}
|
||||||
|
}
|
||||||
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
7
contrib/terraform/aws/modules/elb/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
output "aws_elb_api_id" {
|
||||||
|
value = "${aws_elb.aws-elb-api.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_elb_api_fqdn" {
|
||||||
|
value = "${aws_elb.aws-elb-api.dns_name}"
|
||||||
|
}
|
||||||
28
contrib/terraform/aws/modules/elb/variables.tf
Normal file
28
contrib/terraform/aws/modules/elb/variables.tf
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_vpc_id" {
|
||||||
|
description = "AWS VPC ID"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_elb_api_port" {
|
||||||
|
description = "Port for AWS ELB"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_secure_api_port" {
|
||||||
|
description = "Secure Port of K8S API Server"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_subnet_ids_public" {
|
||||||
|
description = "IDs of Public Subnets"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
138
contrib/terraform/aws/modules/iam/main.tf
Normal file
138
contrib/terraform/aws/modules/iam/main.tf
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
#Add AWS Roles for Kubernetes
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kube-master" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role" "kube-worker" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
|
assume_role_policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "sts:AssumeRole",
|
||||||
|
"Principal": {
|
||||||
|
"Service": "ec2.amazonaws.com"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
#Add AWS Policies for Kubernetes
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kube-master" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-master"
|
||||||
|
role = "${aws_iam_role.kube-master.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["ec2:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["elasticloadbalancing:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_role_policy" "kube-worker" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-node"
|
||||||
|
role = "${aws_iam_role.kube-worker.id}"
|
||||||
|
policy = <<EOF
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::kubernetes-*"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:Describe*",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:AttachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": "ec2:DetachVolume",
|
||||||
|
"Resource": "*"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": ["route53:*"],
|
||||||
|
"Resource": ["*"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Action": [
|
||||||
|
"ecr:GetAuthorizationToken",
|
||||||
|
"ecr:BatchCheckLayerAvailability",
|
||||||
|
"ecr:GetDownloadUrlForLayer",
|
||||||
|
"ecr:GetRepositoryPolicy",
|
||||||
|
"ecr:DescribeRepositories",
|
||||||
|
"ecr:ListImages",
|
||||||
|
"ecr:BatchGetImage"
|
||||||
|
],
|
||||||
|
"Resource": "*"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#Create AWS Instance Profiles
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kube-master" {
|
||||||
|
name = "kube_${var.aws_cluster_name}_master_profile"
|
||||||
|
roles = ["${aws_iam_role.kube-master.name}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_iam_instance_profile" "kube-worker" {
|
||||||
|
name = "kube_${var.aws_cluster_name}_node_profile"
|
||||||
|
roles = ["${aws_iam_role.kube-worker.name}"]
|
||||||
|
}
|
||||||
7
contrib/terraform/aws/modules/iam/outputs.tf
Normal file
7
contrib/terraform/aws/modules/iam/outputs.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
output "kube-master-profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kube-master.name }"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "kube-worker-profile" {
|
||||||
|
value = "${aws_iam_instance_profile.kube-worker.name }"
|
||||||
|
}
|
||||||
3
contrib/terraform/aws/modules/iam/variables.tf
Normal file
3
contrib/terraform/aws/modules/iam/variables.tf
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
138
contrib/terraform/aws/modules/vpc/main.tf
Normal file
138
contrib/terraform/aws/modules/vpc/main.tf
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
|
||||||
|
resource "aws_vpc" "cluster-vpc" {
|
||||||
|
cidr_block = "${var.aws_vpc_cidr_block}"
|
||||||
|
|
||||||
|
#DNS Related Entries
|
||||||
|
enable_dns_support = true
|
||||||
|
enable_dns_hostnames = true
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-vpc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_eip" "cluster-nat-eip" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
vpc = true
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_internet_gateway" "cluster-vpc-internetgw" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-internetgw"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "cluster-vpc-subnets-public" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
count="${length(var.aws_avail_zones)}"
|
||||||
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
|
cidr_block = "${element(var.aws_cidr_subnets_public, count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-public"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_nat_gateway" "cluster-nat-gateway" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
allocation_id = "${element(aws_eip.cluster-nat-eip.*.id, count.index)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id, count.index)}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_subnet" "cluster-vpc-subnets-private" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
count="${length(var.aws_avail_zones)}"
|
||||||
|
availability_zone = "${element(var.aws_avail_zones, count.index)}"
|
||||||
|
cidr_block = "${element(var.aws_cidr_subnets_private, count.index)}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-${element(var.aws_avail_zones, count.index)}-private"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#Routing in VPC
|
||||||
|
|
||||||
|
#TODO: Do we need two routing tables for each subnet for redundancy or is one enough?
|
||||||
|
|
||||||
|
resource "aws_route_table" "kubernetes-public" {
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
gateway_id = "${aws_internet_gateway.cluster-vpc-internetgw.id}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-routetable-public"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table" "kubernetes-private" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
route {
|
||||||
|
cidr_block = "0.0.0.0/0"
|
||||||
|
nat_gateway_id = "${element(aws_nat_gateway.cluster-nat-gateway.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-routetable-private-${count.index}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "kubernetes-public" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_public)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-public.*.id,count.index)}"
|
||||||
|
route_table_id = "${aws_route_table.kubernetes-public.id}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_route_table_association" "kubernetes-private" {
|
||||||
|
count = "${length(var.aws_cidr_subnets_private)}"
|
||||||
|
subnet_id = "${element(aws_subnet.cluster-vpc-subnets-private.*.id,count.index)}"
|
||||||
|
route_table_id = "${element(aws_route_table.kubernetes-private.*.id,count.index)}"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#Kubernetes Security Groups
|
||||||
|
|
||||||
|
resource "aws_security_group" "kubernetes" {
|
||||||
|
name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
|
vpc_id = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
|
||||||
|
tags {
|
||||||
|
Name = "kubernetes-${var.aws_cluster_name}-securitygroup"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-all-ingress" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks= ["${var.aws_vpc_cidr_block}"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-all-egress" {
|
||||||
|
type = "egress"
|
||||||
|
from_port = 0
|
||||||
|
to_port = 65535
|
||||||
|
protocol = "-1"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "aws_security_group_rule" "allow-ssh-connections" {
|
||||||
|
type = "ingress"
|
||||||
|
from_port = 22
|
||||||
|
to_port = 22
|
||||||
|
protocol = "TCP"
|
||||||
|
cidr_blocks = ["0.0.0.0/0"]
|
||||||
|
security_group_id = "${aws_security_group.kubernetes.id}"
|
||||||
|
}
|
||||||
16
contrib/terraform/aws/modules/vpc/outputs.tf
Normal file
16
contrib/terraform/aws/modules/vpc/outputs.tf
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
output "aws_vpc_id" {
|
||||||
|
value = "${aws_vpc.cluster-vpc.id}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_subnet_ids_private" {
|
||||||
|
value = ["${aws_subnet.cluster-vpc-subnets-private.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_subnet_ids_public" {
|
||||||
|
value = ["${aws_subnet.cluster-vpc-subnets-public.*.id}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
output "aws_security_group" {
|
||||||
|
value = ["${aws_security_group.kubernetes.*.id}"]
|
||||||
|
|
||||||
|
}
|
||||||
24
contrib/terraform/aws/modules/vpc/variables.tf
Normal file
24
contrib/terraform/aws/modules/vpc/variables.tf
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
variable "aws_vpc_cidr_block" {
|
||||||
|
description = "CIDR Blocks for AWS VPC"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "AWS Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_private" {
|
||||||
|
description = "CIDR Blocks for private subnets in Availability zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_public" {
|
||||||
|
description = "CIDR Blocks for public subnets in Availability zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
20
contrib/terraform/aws/output.tf
Normal file
20
contrib/terraform/aws/output.tf
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
output "bastion_ip" {
|
||||||
|
value = "${join("\n", aws_instance.bastion-server.*.public_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "masters" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-master.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "workers" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-worker.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "etcd" {
|
||||||
|
value = "${join("\n", aws_instance.k8s-etcd.*.private_ip)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
output "aws_elb_api_fqdn" {
|
||||||
|
value = "${module.aws-elb.aws_elb_api_fqdn}:${var.aws_elb_api_port}"
|
||||||
|
}
|
||||||
27
contrib/terraform/aws/templates/inventory.tpl
Normal file
27
contrib/terraform/aws/templates/inventory.tpl
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
${connection_strings_master}
|
||||||
|
${connection_strings_node}
|
||||||
|
${connection_strings_etcd}
|
||||||
|
|
||||||
|
|
||||||
|
${public_ip_address_bastion}
|
||||||
|
|
||||||
|
[kube-master]
|
||||||
|
${list_master}
|
||||||
|
|
||||||
|
|
||||||
|
[kube-node]
|
||||||
|
${list_node}
|
||||||
|
|
||||||
|
|
||||||
|
[etcd]
|
||||||
|
${list_etcd}
|
||||||
|
|
||||||
|
|
||||||
|
[k8s-cluster:children]
|
||||||
|
kube-node
|
||||||
|
kube-master
|
||||||
|
|
||||||
|
|
||||||
|
[k8s-cluster:vars]
|
||||||
|
${elb_api_fqdn}
|
||||||
|
${elb_api_port}
|
||||||
@@ -1,22 +1,31 @@
|
|||||||
deploymentName="test-kube-deploy"
|
#Global Vars
|
||||||
|
aws_cluster_name = "devtest"
|
||||||
|
|
||||||
numControllers="2"
|
#VPC Vars
|
||||||
numEtcd="3"
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
numNodes="2"
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
|
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||||
|
|
||||||
volSizeController="20"
|
#Bastion Host
|
||||||
volSizeEtcd="20"
|
aws_bastion_ami = "ami-5900cc36"
|
||||||
volSizeNodes="20"
|
aws_bastion_size = "t2.small"
|
||||||
|
|
||||||
awsRegion="us-west-2"
|
|
||||||
subnet="subnet-xxxxx"
|
|
||||||
ami="ami-32a85152"
|
|
||||||
securityGroups="sg-xxxxx"
|
|
||||||
SSHUser="core"
|
|
||||||
SSHKey="my-key"
|
|
||||||
|
|
||||||
master_instance_type="m3.xlarge"
|
#Kubernetes Cluster
|
||||||
etcd_instance_type="m3.xlarge"
|
|
||||||
node_instance_type="m3.xlarge"
|
|
||||||
|
|
||||||
terminate_protect="false"
|
aws_kube_master_num = 3
|
||||||
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_etcd_num = 3
|
||||||
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_kube_worker_num = 4
|
||||||
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_cluster_ami = "ami-903df7ff"
|
||||||
|
|
||||||
|
#Settings AWS ELB
|
||||||
|
|
||||||
|
aws_elb_api_port = 443
|
||||||
|
k8s_secure_api_port = 443
|
||||||
|
|||||||
32
contrib/terraform/aws/terraform.tfvars.example
Normal file
32
contrib/terraform/aws/terraform.tfvars.example
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
#Global Vars
|
||||||
|
aws_cluster_name = "devtest"
|
||||||
|
aws_region = "eu-central-1"
|
||||||
|
|
||||||
|
#VPC Vars
|
||||||
|
aws_vpc_cidr_block = "10.250.192.0/18"
|
||||||
|
aws_cidr_subnets_private = ["10.250.192.0/20","10.250.208.0/20"]
|
||||||
|
aws_cidr_subnets_public = ["10.250.224.0/20","10.250.240.0/20"]
|
||||||
|
aws_avail_zones = ["eu-central-1a","eu-central-1b"]
|
||||||
|
|
||||||
|
#Bastion Host
|
||||||
|
aws_bastion_ami = "ami-5900cc36"
|
||||||
|
aws_bastion_size = "t2.small"
|
||||||
|
|
||||||
|
|
||||||
|
#Kubernetes Cluster
|
||||||
|
|
||||||
|
aws_kube_master_num = 3
|
||||||
|
aws_kube_master_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_etcd_num = 3
|
||||||
|
aws_etcd_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_kube_worker_num = 4
|
||||||
|
aws_kube_worker_size = "t2.medium"
|
||||||
|
|
||||||
|
aws_cluster_ami = "ami-903df7ff"
|
||||||
|
|
||||||
|
#Settings AWS ELB
|
||||||
|
|
||||||
|
aws_elb_api_port = 443
|
||||||
|
k8s_secure_api_port = 443
|
||||||
97
contrib/terraform/aws/variables.tf
Normal file
97
contrib/terraform/aws/variables.tf
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
variable "AWS_ACCESS_KEY_ID" {
|
||||||
|
description = "AWS Access Key"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_SECRET_ACCESS_KEY" {
|
||||||
|
description = "AWS Secret Key"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_SSH_KEY_NAME" {
|
||||||
|
description = "Name of the SSH keypair to use in AWS."
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "AWS_DEFAULT_REGION" {
|
||||||
|
description = "AWS Region"
|
||||||
|
}
|
||||||
|
|
||||||
|
//General Cluster Settings
|
||||||
|
|
||||||
|
variable "aws_cluster_name" {
|
||||||
|
description = "Name of AWS Cluster"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//AWS VPC Variables
|
||||||
|
|
||||||
|
variable "aws_vpc_cidr_block" {
|
||||||
|
description = "CIDR Block for VPC"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_avail_zones" {
|
||||||
|
description = "Availability Zones Used"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_private" {
|
||||||
|
description = "CIDR Blocks for private subnets in Availability Zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cidr_subnets_public" {
|
||||||
|
description = "CIDR Blocks for public subnets in Availability Zones"
|
||||||
|
type = "list"
|
||||||
|
}
|
||||||
|
|
||||||
|
//AWS EC2 Settings
|
||||||
|
|
||||||
|
variable "aws_bastion_ami" {
|
||||||
|
description = "AMI ID for Bastion Host in chosen AWS Region"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_bastion_size" {
|
||||||
|
description = "EC2 Instance Size of Bastion Host"
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* AWS EC2 Settings
|
||||||
|
* The number should be divisable by the number of used
|
||||||
|
* AWS Availability Zones without an remainder.
|
||||||
|
*/
|
||||||
|
variable "aws_kube_master_num" {
|
||||||
|
description = "Number of Kubernetes Master Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_master_size" {
|
||||||
|
description = "Instance size of Kube Master Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_etcd_num" {
|
||||||
|
description = "Number of etcd Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_etcd_size" {
|
||||||
|
description = "Instance size of etcd Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_worker_num" {
|
||||||
|
description = "Number of Kubernetes Worker Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_kube_worker_size" {
|
||||||
|
description = "Instance size of Kubernetes Worker Nodes"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "aws_cluster_ami" {
|
||||||
|
description = "AMI ID for Kubernetes Cluster"
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* AWS ELB Settings
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
variable "aws_elb_api_port" {
|
||||||
|
description = "Port for AWS ELB"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "k8s_secure_api_port" {
|
||||||
|
description = "Secure Port of K8S API Server"
|
||||||
|
}
|
||||||
1
contrib/terraform/group_vars
Symbolic link
1
contrib/terraform/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../inventory/group_vars
|
||||||
@@ -5,14 +5,13 @@ Openstack.
|
|||||||
|
|
||||||
## Status
|
## Status
|
||||||
|
|
||||||
This will install a Kubernetes cluster on an Openstack Cloud. It is tested on a
|
This will install a Kubernetes cluster on an Openstack Cloud. It has been tested on a
|
||||||
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and
|
OpenStack Cloud provided by [BlueBox](https://www.blueboxcloud.com/) and on OpenStack at [EMBL-EBI's](http://www.ebi.ac.uk/) [EMBASSY Cloud](http://www.embassycloud.org/). This should work on most modern installs of OpenStack that support the basic
|
||||||
should work on most modern installs of OpenStack that support the basic
|
|
||||||
services.
|
services.
|
||||||
|
|
||||||
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
There are some assumptions made to try and ensure it will work on your openstack cluster.
|
||||||
|
|
||||||
* floating-ips are used for access
|
* floating-ips are used for access, but you can have masters and nodes that don't use floating-ips if needed. You need currently at least 1 floating ip, which we would suggest is used on a master.
|
||||||
* you already have a suitable OS image in glance
|
* you already have a suitable OS image in glance
|
||||||
* you already have both an internal network and a floating-ip pool created
|
* you already have both an internal network and a floating-ip pool created
|
||||||
* you have security-groups enabled
|
* you have security-groups enabled
|
||||||
@@ -24,16 +23,14 @@ There are some assumptions made to try and ensure it will work on your openstack
|
|||||||
|
|
||||||
## Terraform
|
## Terraform
|
||||||
|
|
||||||
Terraform will be used to provision all of the OpenStack resources required to
|
Terraform will be used to provision all of the OpenStack resources. It is also used to deploy and provision the software
|
||||||
run Docker Swarm. It is also used to deploy and provision the software
|
|
||||||
requirements.
|
requirements.
|
||||||
|
|
||||||
### Prep
|
### Prep
|
||||||
|
|
||||||
#### OpenStack
|
#### OpenStack
|
||||||
|
|
||||||
Ensure your OpenStack credentials are loaded in environment variables. This is
|
Ensure your OpenStack **Identity v2** credentials are loaded in environment variables. This can be done by downloading a credentials .rc file from your OpenStack dashboard and sourcing it:
|
||||||
how I do it:
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ source ~/.stackrc
|
$ source ~/.stackrc
|
||||||
@@ -46,7 +43,7 @@ differences between OpenStack installs the Terraform does not attempt to create
|
|||||||
these for you.
|
these for you.
|
||||||
|
|
||||||
By default Terraform will expect that your networks are called `internal` and
|
By default Terraform will expect that your networks are called `internal` and
|
||||||
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`.
|
`external`. You can change this by altering the Terraform variables `network_name` and `floatingip_pool`. This can be done on a new variables file or through environment variables.
|
||||||
|
|
||||||
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
A full list of variables you can change can be found at [variables.tf](variables.tf).
|
||||||
|
|
||||||
@@ -76,8 +73,36 @@ $ echo Setting up Terraform creds && \
|
|||||||
export TF_VAR_auth_url=${OS_AUTH_URL}
|
export TF_VAR_auth_url=${OS_AUTH_URL}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to provision master or node VMs that don't use floating ips, write on a `my-terraform-vars.tfvars` file, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
number_of_k8s_masters = "1"
|
||||||
|
number_of_k8s_masters_no_floating_ip = "2"
|
||||||
|
number_of_k8s_nodes_no_floating_ip = "1"
|
||||||
|
number_of_k8s_nodes = "0"
|
||||||
|
```
|
||||||
|
This will provision one VM as master using a floating ip, two additional masters using no floating ips (these will only have private ips inside your tenancy) and one VM as node, again without a floating ip.
|
||||||
|
|
||||||
|
Additionally, now the terraform based installation supports provisioning of a GlusterFS shared file system based on a separate set of VMs, running either a Debian or RedHat based set of VMs. To enable this, you need to add to your `my-terraform-vars.tfvars` the following variables:
|
||||||
|
|
||||||
|
```
|
||||||
|
# Flavour depends on your openstack installation, you can get available flavours through `nova list-flavors`
|
||||||
|
flavor_gfs_node = "af659280-5b8a-42b5-8865-a703775911da"
|
||||||
|
# This is the name of an image already available in your openstack installation.
|
||||||
|
image_gfs = "Ubuntu 15.10"
|
||||||
|
number_of_gfs_nodes_no_floating_ip = "3"
|
||||||
|
# This is the size of the non-ephemeral volumes to be attached to store the GlusterFS bricks.
|
||||||
|
gfs_volume_size_in_gb = "50"
|
||||||
|
# The user needed for the image choosen for GlusterFS.
|
||||||
|
ssh_user_gfs = "ubuntu"
|
||||||
|
```
|
||||||
|
|
||||||
|
If these variables are provided, this will give rise to a new ansible group called `gfs-cluster`, for which we have added ansible roles to execute in the ansible provisioning step. If you are using Container Linux by CoreOS, these GlusterFS VM necessarily need to be either Debian or RedHat based VMs, Container Linux by CoreOS cannot serve GlusterFS, but can connect to it through binaries available on hyperkube v1.4.3_coreos.0 or higher.
|
||||||
|
|
||||||
|
|
||||||
# Provision a Kubernetes Cluster on OpenStack
|
# Provision a Kubernetes Cluster on OpenStack
|
||||||
|
|
||||||
|
If not using a tfvars file for your setup, then execute:
|
||||||
```
|
```
|
||||||
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate contrib/terraform/openstack
|
||||||
openstack_compute_secgroup_v2.k8s_master: Creating...
|
openstack_compute_secgroup_v2.k8s_master: Creating...
|
||||||
@@ -96,6 +121,13 @@ use the `terraform show` command.
|
|||||||
State path: contrib/terraform/openstack/terraform.tfstate
|
State path: contrib/terraform/openstack/terraform.tfstate
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alternatively, if you wrote your terraform variables on a file `my-terraform-vars.tfvars`, your command would look like:
|
||||||
|
```
|
||||||
|
terraform apply -state=contrib/terraform/openstack/terraform.tfstate -var-file=my-terraform-vars.tfvars contrib/terraform/openstack
|
||||||
|
```
|
||||||
|
|
||||||
|
if you choose to add masters or nodes without floating ips (only internal ips on your OpenStack tenancy), this script will create as well a file `contrib/terraform/openstack/k8s-cluster.yml` with an ssh command for ansible to be able to access your machines tunneling through the first floating ip used. If you want to manually handling the ssh tunneling to these machines, please delete or move that file. If you want to use this, just leave it there, as ansible will pick it up automatically.
|
||||||
|
|
||||||
Make sure you can connect to the hosts:
|
Make sure you can connect to the hosts:
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -114,6 +146,8 @@ example-k8s-master-1 | SUCCESS => {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
if you are deploying a system that needs bootstrapping, like Container Linux by CoreOS, these might have a state `FAILED` due to Container Linux by CoreOS not having python. As long as the state is not `UNREACHABLE`, this is fine.
|
||||||
|
|
||||||
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
if it fails try to connect manually via SSH ... it could be somthing as simple as a stale host key.
|
||||||
|
|
||||||
Deploy kubernetes:
|
Deploy kubernetes:
|
||||||
|
|||||||
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
1
contrib/terraform/openstack/ansible_bastion_template.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
ansible_ssh_common_args: '-o ProxyCommand="ssh -o StrictHostKeyChecking=no -W %h:%p -q USER@BASTION_ADDRESS"'
|
||||||
1
contrib/terraform/openstack/group_vars
Symbolic link
1
contrib/terraform/openstack/group_vars
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../../../inventory/group_vars
|
||||||
@@ -1,136 +0,0 @@
|
|||||||
# Directory where the binaries will be installed
|
|
||||||
bin_dir: /usr/local/bin
|
|
||||||
|
|
||||||
# Where the binaries will be downloaded.
|
|
||||||
# Note: ensure that you've enough disk space (about 1G)
|
|
||||||
local_release_dir: "/tmp/releases"
|
|
||||||
|
|
||||||
# Uncomment this line for CoreOS only.
|
|
||||||
# Directory where python binary is installed
|
|
||||||
# ansible_python_interpreter: "/opt/bin/python"
|
|
||||||
|
|
||||||
# This is the group that the cert creation scripts chgrp the
|
|
||||||
# cert files to. Not really changable...
|
|
||||||
kube_cert_group: kube-cert
|
|
||||||
|
|
||||||
# Cluster Loglevel configuration
|
|
||||||
kube_log_level: 2
|
|
||||||
|
|
||||||
# Users to create for basic auth in Kubernetes API via HTTP
|
|
||||||
kube_api_pwd: "changeme"
|
|
||||||
kube_users:
|
|
||||||
kube:
|
|
||||||
pass: "{{kube_api_pwd}}"
|
|
||||||
role: admin
|
|
||||||
root:
|
|
||||||
pass: "changeme"
|
|
||||||
role: admin
|
|
||||||
|
|
||||||
# Kubernetes cluster name, also will be used as DNS domain
|
|
||||||
cluster_name: cluster.local
|
|
||||||
|
|
||||||
# For some environments, each node has a pubilcally accessible
|
|
||||||
# address and an address it should bind services to. These are
|
|
||||||
# really inventory level variables, but described here for consistency.
|
|
||||||
#
|
|
||||||
# When advertising access, the access_ip will be used, but will defer to
|
|
||||||
# ip and then the default ansible ip when unspecified.
|
|
||||||
#
|
|
||||||
# When binding to restrict access, the ip variable will be used, but will
|
|
||||||
# defer to the default ansible ip when unspecified.
|
|
||||||
#
|
|
||||||
# The ip variable is used for specific address binding, e.g. listen address
|
|
||||||
# for etcd. This is use to help with environments like Vagrant or multi-nic
|
|
||||||
# systems where one address should be preferred over another.
|
|
||||||
# ip: 10.2.2.2
|
|
||||||
#
|
|
||||||
# The access_ip variable is used to define how other nodes should access
|
|
||||||
# the node. This is used in flannel to allow other flannel nodes to see
|
|
||||||
# this node for example. The access_ip is really useful AWS and Google
|
|
||||||
# environments where the nodes are accessed remotely by the "public" ip,
|
|
||||||
# but don't know about that address themselves.
|
|
||||||
# access_ip: 1.1.1.1
|
|
||||||
|
|
||||||
# Choose network plugin (calico, weave or flannel)
|
|
||||||
kube_network_plugin: flannel
|
|
||||||
|
|
||||||
# Kubernetes internal network for services, unused block of space.
|
|
||||||
kube_service_addresses: 10.233.0.0/18
|
|
||||||
|
|
||||||
# internal network. When used, it will assign IP
|
|
||||||
# addresses from this range to individual pods.
|
|
||||||
# This network must be unused in your network infrastructure!
|
|
||||||
kube_pods_subnet: 10.233.64.0/18
|
|
||||||
|
|
||||||
# internal network total size (optional). This is the prefix of the
|
|
||||||
# entire network. Must be unused in your environment.
|
|
||||||
# kube_network_prefix: 18
|
|
||||||
|
|
||||||
# internal network node size allocation (optional). This is the size allocated
|
|
||||||
# to each node on your network. With these defaults you should have
|
|
||||||
# room for 4096 nodes with 254 pods per node.
|
|
||||||
kube_network_node_prefix: 24
|
|
||||||
|
|
||||||
# With calico it is possible to distributed routes with border routers of the datacenter.
|
|
||||||
peer_with_router: false
|
|
||||||
# Warning : enabling router peering will disable calico's default behavior ('node mesh').
|
|
||||||
# The subnets of each nodes will be distributed by the datacenter router
|
|
||||||
|
|
||||||
# The port the API Server will be listening on.
|
|
||||||
kube_apiserver_ip: "{{ kube_service_addresses|ipaddr('net')|ipaddr(1)|ipaddr('address') }}"
|
|
||||||
kube_apiserver_port: 443 # (https)
|
|
||||||
kube_apiserver_insecure_port: 8080 # (http)
|
|
||||||
|
|
||||||
# Internal DNS configuration.
|
|
||||||
# Kubernetes can create and mainatain its own DNS server to resolve service names
|
|
||||||
# into appropriate IP addresses. It's highly advisable to run such DNS server,
|
|
||||||
# as it greatly simplifies configuration of your applications - you can use
|
|
||||||
# service names instead of magic environment variables.
|
|
||||||
# You still must manually configure all your containers to use this DNS server,
|
|
||||||
# Kubernetes won't do this for you (yet).
|
|
||||||
|
|
||||||
# Upstream dns servers used by dnsmasq
|
|
||||||
upstream_dns_servers:
|
|
||||||
- 8.8.8.8
|
|
||||||
- 8.8.4.4
|
|
||||||
#
|
|
||||||
# # Use dns server : https://github.com/ansibl8s/k8s-skydns/blob/master/skydns-README.md
|
|
||||||
dns_setup: true
|
|
||||||
dns_domain: "{{ cluster_name }}"
|
|
||||||
#
|
|
||||||
# # Ip address of the kubernetes skydns service
|
|
||||||
skydns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(3)|ipaddr('address') }}"
|
|
||||||
dns_server: "{{ kube_service_addresses|ipaddr('net')|ipaddr(2)|ipaddr('address') }}"
|
|
||||||
|
|
||||||
# There are some changes specific to the cloud providers
|
|
||||||
# for instance we need to encapsulate packets with some network plugins
|
|
||||||
# If set the possible values are either 'gce', 'aws' or 'openstack'
|
|
||||||
# When openstack is used make sure to source in the openstack credentials
|
|
||||||
# like you would do when using nova-client before starting the playbook.
|
|
||||||
# cloud_provider:
|
|
||||||
|
|
||||||
# For multi masters architecture:
|
|
||||||
# kube-proxy doesn't support multiple apiservers for the time being so you'll need to configure your own loadbalancer
|
|
||||||
# This domain name will be inserted into the /etc/hosts file of all servers
|
|
||||||
# configuration example with haproxy :
|
|
||||||
# listen kubernetes-apiserver-https
|
|
||||||
# bind 10.99.0.21:8383
|
|
||||||
# option ssl-hello-chk
|
|
||||||
# mode tcp
|
|
||||||
# timeout client 3h
|
|
||||||
# timeout server 3h
|
|
||||||
# server master1 10.99.0.26:443
|
|
||||||
# server master2 10.99.0.27:443
|
|
||||||
# balance roundrobin
|
|
||||||
# apiserver_loadbalancer_domain_name: "lb-apiserver.kubernetes.local"
|
|
||||||
|
|
||||||
## Set these proxy values in order to update docker daemon to use proxies
|
|
||||||
# http_proxy: ""
|
|
||||||
# https_proxy: ""
|
|
||||||
# no_proxy: ""
|
|
||||||
|
|
||||||
## A string of extra options to pass to the docker daemon.
|
|
||||||
## This string should be exactly as you wish it to appear.
|
|
||||||
## An obvious use case is allowing insecure-registry access
|
|
||||||
## to self hosted registries like so:
|
|
||||||
docker_options: "--insecure-registry={{ kube_service_addresses }}"
|
|
||||||
@@ -68,7 +68,29 @@ resource "openstack_compute_instance_v2" "k8s_master" {
|
|||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_master.*.address, count.index)}"
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster"
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault"
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_master_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-master-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_masters_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_master}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = [ "${openstack_compute_secgroup_v2.k8s_master.name}",
|
||||||
|
"${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "etcd,kube-master,kube-node,k8s-cluster,vault,no-floating"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -85,10 +107,61 @@ resource "openstack_compute_instance_v2" "k8s_node" {
|
|||||||
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
floating_ip = "${element(openstack_networking_floatingip_v2.k8s_node.*.address, count.index)}"
|
||||||
metadata = {
|
metadata = {
|
||||||
ssh_user = "${var.ssh_user}"
|
ssh_user = "${var.ssh_user}"
|
||||||
kubespray_groups = "kube-node,k8s-cluster"
|
kubespray_groups = "kube-node,k8s-cluster,vault"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "k8s_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-k8s-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_k8s_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image}"
|
||||||
|
flavor_id = "${var.flavor_k8s_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user}"
|
||||||
|
kubespray_groups = "kube-node,k8s-cluster,vault,no-floating"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/no-floating.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_blockstorage_volume_v2" "glusterfs_volume" {
|
||||||
|
name = "${var.cluster_name}-gfs-nephe-vol-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
description = "Non-ephemeral volume for GlusterFS"
|
||||||
|
size = "${var.gfs_volume_size_in_gb}"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "openstack_compute_instance_v2" "glusterfs_node_no_floating_ip" {
|
||||||
|
name = "${var.cluster_name}-gfs-node-nf-${count.index+1}"
|
||||||
|
count = "${var.number_of_gfs_nodes_no_floating_ip}"
|
||||||
|
image_name = "${var.image_gfs}"
|
||||||
|
flavor_id = "${var.flavor_gfs_node}"
|
||||||
|
key_pair = "${openstack_compute_keypair_v2.k8s.name}"
|
||||||
|
network {
|
||||||
|
name = "${var.network_name}"
|
||||||
|
}
|
||||||
|
security_groups = ["${openstack_compute_secgroup_v2.k8s.name}" ]
|
||||||
|
metadata = {
|
||||||
|
ssh_user = "${var.ssh_user_gfs}"
|
||||||
|
kubespray_groups = "gfs-cluster,network-storage"
|
||||||
|
}
|
||||||
|
volume {
|
||||||
|
volume_id = "${element(openstack_blockstorage_volume_v2.glusterfs_volume.*.id, count.index)}"
|
||||||
|
}
|
||||||
|
provisioner "local-exec" {
|
||||||
|
command = "sed s/USER/${var.ssh_user}/ contrib/terraform/openstack/ansible_bastion_template.txt | sed s/BASTION_ADDRESS/${element(openstack_networking_floatingip_v2.k8s_master.*.address, 0)}/ > contrib/terraform/openstack/group_vars/gfs-cluster.yml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#output "msg" {
|
#output "msg" {
|
||||||
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
# value = "Your hosts are ready to go!\nYour ssh hosts are: ${join(", ", openstack_networking_floatingip_v2.k8s_master.*.address )}"
|
||||||
#}
|
#}
|
||||||
|
|||||||
@@ -1,238 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"serial": 17,
|
|
||||||
"modules": [
|
|
||||||
{
|
|
||||||
"path": [
|
|
||||||
"root"
|
|
||||||
],
|
|
||||||
"outputs": {},
|
|
||||||
"resources": {
|
|
||||||
"openstack_compute_instance_v2.k8s_master.0": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.12",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.12",
|
|
||||||
"id": "f4a44f6e-33ff-4e35-b593-34f3dfd80dc9",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-master-1",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.86",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.12",
|
|
||||||
"network.0.mac": "fa:16:3e:fb:82:1d",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "2",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"security_groups.3772290257": "example-k8s-master",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_instance_v2.k8s_master.1": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.70",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.70",
|
|
||||||
"id": "cbb565fe-a3b6-44ff-8f81-8ec29704d11b",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "etcd,kube-master,kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-master-2",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.85",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.70",
|
|
||||||
"network.0.mac": "fa:16:3e:33:98:e6",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "2",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"security_groups.3772290257": "example-k8s-master",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_instance_v2.k8s_node": {
|
|
||||||
"type": "openstack_compute_instance_v2",
|
|
||||||
"depends_on": [
|
|
||||||
"openstack_compute_keypair_v2.k8s",
|
|
||||||
"openstack_compute_secgroup_v2.k8s",
|
|
||||||
"openstack_networking_floatingip_v2.k8s_node"
|
|
||||||
],
|
|
||||||
"primary": {
|
|
||||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
|
||||||
"attributes": {
|
|
||||||
"access_ip_v4": "173.247.105.76",
|
|
||||||
"access_ip_v6": "",
|
|
||||||
"flavor_id": "3",
|
|
||||||
"flavor_name": "m1.medium",
|
|
||||||
"floating_ip": "173.247.105.76",
|
|
||||||
"id": "39deed7e-8307-4b62-b56c-ce2b405a03fa",
|
|
||||||
"image_id": "1525c3f3-1224-4958-bd07-da9feaedf18b",
|
|
||||||
"image_name": "ubuntu-14.04",
|
|
||||||
"key_pair": "kubernetes-example",
|
|
||||||
"metadata.#": "2",
|
|
||||||
"metadata.kubespray_groups": "kube-node,k8s-cluster",
|
|
||||||
"metadata.ssh_user": "ubuntu",
|
|
||||||
"name": "example-k8s-node-1",
|
|
||||||
"network.#": "1",
|
|
||||||
"network.0.access_network": "false",
|
|
||||||
"network.0.fixed_ip_v4": "10.230.7.84",
|
|
||||||
"network.0.fixed_ip_v6": "",
|
|
||||||
"network.0.floating_ip": "173.247.105.76",
|
|
||||||
"network.0.mac": "fa:16:3e:53:57:bc",
|
|
||||||
"network.0.name": "internal",
|
|
||||||
"network.0.port": "",
|
|
||||||
"network.0.uuid": "ba0fdd03-72b5-41eb-bb67-fef437fd6cb4",
|
|
||||||
"security_groups.#": "1",
|
|
||||||
"security_groups.2779334175": "example-k8s",
|
|
||||||
"volume.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_keypair_v2.k8s": {
|
|
||||||
"type": "openstack_compute_keypair_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "kubernetes-example",
|
|
||||||
"attributes": {
|
|
||||||
"id": "kubernetes-example",
|
|
||||||
"name": "kubernetes-example",
|
|
||||||
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9nU6RPYCabjLH1LvJfpp9L8r8q5RZ6niS92zD95xpm2b2obVydWe0tCSFdmULBuvT8Q8YQ4qOG2g/oJlsGOsia+4CQjYEUV9CgTH9H5HK3vUOwtO5g2eFnYKSmI/4znHa0WYpQFnQK2kSSeCs2beTlJhc8vjfN/2HHmuny6SxNSbnCk/nZdwamxEONIVdjlm3CSBlq4PChT/D/uUqm/nOm0Zqdk9ZlTBkucsjiOCJeEzg4HioKmIH8ewqsKuS7kMADHPH98JMdBhTKbYbLrxTC/RfiaON58WJpmdOA935TT5Td5aVQZoqe/i/5yFRp5fMG239jtfbM0Igu44TEIib pczarkowski@Pauls-MacBook-Pro.local\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_secgroup_v2.k8s": {
|
|
||||||
"type": "openstack_compute_secgroup_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
|
||||||
"attributes": {
|
|
||||||
"description": "example - Kubernetes",
|
|
||||||
"id": "418394e2-b4be-4953-b7a3-b309bf28fbdb",
|
|
||||||
"name": "example-k8s",
|
|
||||||
"rule.#": "5",
|
|
||||||
"rule.112275015.cidr": "",
|
|
||||||
"rule.112275015.from_group_id": "",
|
|
||||||
"rule.112275015.from_port": "1",
|
|
||||||
"rule.112275015.id": "597170c9-b35a-45c0-8717-652a342f3fd6",
|
|
||||||
"rule.112275015.ip_protocol": "tcp",
|
|
||||||
"rule.112275015.self": "true",
|
|
||||||
"rule.112275015.to_port": "65535",
|
|
||||||
"rule.2180185248.cidr": "0.0.0.0/0",
|
|
||||||
"rule.2180185248.from_group_id": "",
|
|
||||||
"rule.2180185248.from_port": "-1",
|
|
||||||
"rule.2180185248.id": "ffdcdd5e-f18b-4537-b502-8849affdfed9",
|
|
||||||
"rule.2180185248.ip_protocol": "icmp",
|
|
||||||
"rule.2180185248.self": "false",
|
|
||||||
"rule.2180185248.to_port": "-1",
|
|
||||||
"rule.3267409695.cidr": "",
|
|
||||||
"rule.3267409695.from_group_id": "",
|
|
||||||
"rule.3267409695.from_port": "-1",
|
|
||||||
"rule.3267409695.id": "4f91d9ca-940c-4f4d-9ce1-024cbd7d9c54",
|
|
||||||
"rule.3267409695.ip_protocol": "icmp",
|
|
||||||
"rule.3267409695.self": "true",
|
|
||||||
"rule.3267409695.to_port": "-1",
|
|
||||||
"rule.635693822.cidr": "",
|
|
||||||
"rule.635693822.from_group_id": "",
|
|
||||||
"rule.635693822.from_port": "1",
|
|
||||||
"rule.635693822.id": "c6816e5b-a1a4-4071-acce-d09b92d14d49",
|
|
||||||
"rule.635693822.ip_protocol": "udp",
|
|
||||||
"rule.635693822.self": "true",
|
|
||||||
"rule.635693822.to_port": "65535",
|
|
||||||
"rule.836640770.cidr": "0.0.0.0/0",
|
|
||||||
"rule.836640770.from_group_id": "",
|
|
||||||
"rule.836640770.from_port": "22",
|
|
||||||
"rule.836640770.id": "8845acba-636b-4c23-b9e2-5bff76d9008d",
|
|
||||||
"rule.836640770.ip_protocol": "tcp",
|
|
||||||
"rule.836640770.self": "false",
|
|
||||||
"rule.836640770.to_port": "22"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_compute_secgroup_v2.k8s_master": {
|
|
||||||
"type": "openstack_compute_secgroup_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
|
||||||
"attributes": {
|
|
||||||
"description": "example - Kubernetes Master",
|
|
||||||
"id": "c74aed25-6161-46c4-a488-dfc7f49a228e",
|
|
||||||
"name": "example-k8s-master",
|
|
||||||
"rule.#": "0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master.0": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.12",
|
|
||||||
"id": "2a320c67-214d-4631-a840-2de82505ed3f",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_master.1": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.70",
|
|
||||||
"id": "3adbfc13-e7ae-4bcf-99d3-3ba9db056e1f",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"openstack_networking_floatingip_v2.k8s_node": {
|
|
||||||
"type": "openstack_networking_floatingip_v2",
|
|
||||||
"primary": {
|
|
||||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
|
||||||
"attributes": {
|
|
||||||
"address": "173.247.105.76",
|
|
||||||
"id": "a3f77aa6-5c3a-4edf-b97e-ee211dfa81e1",
|
|
||||||
"pool": "external",
|
|
||||||
"port_id": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"serial": 16,
|
|
||||||
"modules": [
|
|
||||||
{
|
|
||||||
"path": [
|
|
||||||
"root"
|
|
||||||
],
|
|
||||||
"outputs": {},
|
|
||||||
"resources": {}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -6,10 +6,26 @@ variable "number_of_k8s_masters" {
|
|||||||
default = 2
|
default = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_masters_no_floating_ip" {
|
||||||
|
default = 2
|
||||||
|
}
|
||||||
|
|
||||||
variable "number_of_k8s_nodes" {
|
variable "number_of_k8s_nodes" {
|
||||||
default = 1
|
default = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "number_of_k8s_nodes_no_floating_ip" {
|
||||||
|
default = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "number_of_gfs_nodes_no_floating_ip" {
|
||||||
|
default = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "gfs_volume_size_in_gb" {
|
||||||
|
default = 75
|
||||||
|
}
|
||||||
|
|
||||||
variable "public_key_path" {
|
variable "public_key_path" {
|
||||||
description = "The path of the ssh pub key"
|
description = "The path of the ssh pub key"
|
||||||
default = "~/.ssh/id_rsa.pub"
|
default = "~/.ssh/id_rsa.pub"
|
||||||
@@ -20,11 +36,21 @@ variable "image" {
|
|||||||
default = "ubuntu-14.04"
|
default = "ubuntu-14.04"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "image_gfs" {
|
||||||
|
description = "Glance image to use for GlusterFS"
|
||||||
|
default = "ubuntu-16.04"
|
||||||
|
}
|
||||||
|
|
||||||
variable "ssh_user" {
|
variable "ssh_user" {
|
||||||
description = "used to fill out tags for ansible inventory"
|
description = "used to fill out tags for ansible inventory"
|
||||||
default = "ubuntu"
|
default = "ubuntu"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "ssh_user_gfs" {
|
||||||
|
description = "used to fill out tags for ansible inventory"
|
||||||
|
default = "ubuntu"
|
||||||
|
}
|
||||||
|
|
||||||
variable "flavor_k8s_master" {
|
variable "flavor_k8s_master" {
|
||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
@@ -33,6 +59,9 @@ variable "flavor_k8s_node" {
|
|||||||
default = 3
|
default = 3
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "flavor_gfs_node" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
variable "network_name" {
|
variable "network_name" {
|
||||||
description = "name of the internal network to use"
|
description = "name of the internal network to use"
|
||||||
|
|||||||
@@ -309,6 +309,7 @@ def openstack_host(resource, module_name):
|
|||||||
attrs = {
|
attrs = {
|
||||||
'access_ip_v4': raw_attrs['access_ip_v4'],
|
'access_ip_v4': raw_attrs['access_ip_v4'],
|
||||||
'access_ip_v6': raw_attrs['access_ip_v6'],
|
'access_ip_v6': raw_attrs['access_ip_v6'],
|
||||||
|
'ip': raw_attrs['network.0.fixed_ip_v4'],
|
||||||
'flavor': parse_dict(raw_attrs, 'flavor',
|
'flavor': parse_dict(raw_attrs, 'flavor',
|
||||||
sep='_'),
|
sep='_'),
|
||||||
'id': raw_attrs['id'],
|
'id': raw_attrs['id'],
|
||||||
@@ -346,6 +347,15 @@ def openstack_host(resource, module_name):
|
|||||||
if 'metadata.ssh_user' in raw_attrs:
|
if 'metadata.ssh_user' in raw_attrs:
|
||||||
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
attrs['ansible_ssh_user'] = raw_attrs['metadata.ssh_user']
|
||||||
|
|
||||||
|
if 'volume.#' in raw_attrs.keys() and int(raw_attrs['volume.#']) > 0:
|
||||||
|
device_index = 1
|
||||||
|
for key, value in raw_attrs.items():
|
||||||
|
match = re.search("^volume.*.device$", key)
|
||||||
|
if match:
|
||||||
|
attrs['disk_volume_device_'+str(device_index)] = value
|
||||||
|
device_index += 1
|
||||||
|
|
||||||
|
|
||||||
# attrs specific to Mantl
|
# attrs specific to Mantl
|
||||||
attrs.update({
|
attrs.update({
|
||||||
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user