mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 01:59:57 +00:00
Compare commits
677 Commits
paris
...
qconuk2019
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
787ed190b0 | ||
|
|
745ebefc3d | ||
|
|
53907d82b4 | ||
|
|
b8f11b3c72 | ||
|
|
9b4413f332 | ||
|
|
e5a7e15ef8 | ||
|
|
52be1aa464 | ||
|
|
6a644e53e0 | ||
|
|
ff91c26976 | ||
|
|
ff40b79775 | ||
|
|
3f8ec37225 | ||
|
|
1dfdec413a | ||
|
|
cf3fae6db1 | ||
|
|
c9b85650cb | ||
|
|
964057cd52 | ||
|
|
da13946ba0 | ||
|
|
f6d154cb84 | ||
|
|
1657503da1 | ||
|
|
af8441912e | ||
|
|
e16c1d982a | ||
|
|
1fb0ec7580 | ||
|
|
ad80914000 | ||
|
|
d877844a5e | ||
|
|
195c08cb91 | ||
|
|
8a3dad3206 | ||
|
|
4f59e293ee | ||
|
|
8753279603 | ||
|
|
d84c585fdc | ||
|
|
b8f8ffa07d | ||
|
|
4f2ecb0f4a | ||
|
|
662b3a47a0 | ||
|
|
8325dcc6a0 | ||
|
|
42c1a93d5f | ||
|
|
8d1737c2b3 | ||
|
|
8045215c63 | ||
|
|
ad20e1efe6 | ||
|
|
f0f3d70521 | ||
|
|
53cf52f05c | ||
|
|
e280cec60f | ||
|
|
c8047897e7 | ||
|
|
cc071b79c3 | ||
|
|
869f46060a | ||
|
|
258c134421 | ||
|
|
c6d9edbf12 | ||
|
|
5fc62e8fd7 | ||
|
|
f207adfe13 | ||
|
|
8c2107fba9 | ||
|
|
d4096e9c21 | ||
|
|
5c89738ab6 | ||
|
|
893a84feb7 | ||
|
|
f807964416 | ||
|
|
2ea9cbb00f | ||
|
|
8cd9a314d3 | ||
|
|
ede085cf48 | ||
|
|
bc349d6c4d | ||
|
|
80d6b57697 | ||
|
|
5c2599a2b9 | ||
|
|
a6f6ff161d | ||
|
|
6aaa8fab75 | ||
|
|
01042101a2 | ||
|
|
5afb37a3b9 | ||
|
|
995ea626db | ||
|
|
a1adbb66c8 | ||
|
|
3212561c89 | ||
|
|
003a232b79 | ||
|
|
2770da68cd | ||
|
|
c502d019ff | ||
|
|
a07e50ecf8 | ||
|
|
46c6866ce9 | ||
|
|
fe95318108 | ||
|
|
65232f93ba | ||
|
|
9fa7b958dc | ||
|
|
a95e5c960e | ||
|
|
5b87162e95 | ||
|
|
8c4914294e | ||
|
|
7b9b9f527d | ||
|
|
3c7f39747c | ||
|
|
be67a742ee | ||
|
|
40cd934118 | ||
|
|
556db65251 | ||
|
|
ff781a3065 | ||
|
|
8348d750df | ||
|
|
9afa0acbf9 | ||
|
|
cb624755e4 | ||
|
|
523ca55831 | ||
|
|
f0b48935fa | ||
|
|
abcc47b563 | ||
|
|
33e1bfd8be | ||
|
|
2efc29991e | ||
|
|
11387f1330 | ||
|
|
fe93dccbac | ||
|
|
5fad84a7cf | ||
|
|
22dd6b4e70 | ||
|
|
a3594e7e1e | ||
|
|
7f74e5ce32 | ||
|
|
9e051abb32 | ||
|
|
3ebcfd142b | ||
|
|
6c5d049c4c | ||
|
|
072ba44cba | ||
|
|
bc8a9dc4e7 | ||
|
|
b1ba881eee | ||
|
|
337a5d94ed | ||
|
|
43acccc0af | ||
|
|
4a447c7bf5 | ||
|
|
b9de73d0fd | ||
|
|
6b9b83a7ae | ||
|
|
3f7675be04 | ||
|
|
b4bb9e5958 | ||
|
|
9a6160ba1f | ||
|
|
1d243b72ec | ||
|
|
c5c1ccaa25 | ||
|
|
b68afe502b | ||
|
|
d18cacab4c | ||
|
|
2faca4a507 | ||
|
|
d797ec62ed | ||
|
|
a475d63789 | ||
|
|
dd3f2d054f | ||
|
|
73594fd505 | ||
|
|
16a1b5c6b5 | ||
|
|
ff7a257844 | ||
|
|
77046a8ddf | ||
|
|
3ca696f059 | ||
|
|
305db76340 | ||
|
|
b1672704e8 | ||
|
|
c058f67a1f | ||
|
|
ab56c63901 | ||
|
|
a5341f9403 | ||
|
|
b2bdac3384 | ||
|
|
a2531a0c63 | ||
|
|
84e2b90375 | ||
|
|
9639dfb9cc | ||
|
|
8722de6da2 | ||
|
|
f2f87e52b0 | ||
|
|
56ad2845e7 | ||
|
|
f23272d154 | ||
|
|
86e35480a4 | ||
|
|
1020a8ff86 | ||
|
|
20b1079a22 | ||
|
|
f090172413 | ||
|
|
e4251cfa8f | ||
|
|
b6dd55b21c | ||
|
|
53d1a68765 | ||
|
|
156ce67413 | ||
|
|
e372850b06 | ||
|
|
f543b54426 | ||
|
|
35614714c8 | ||
|
|
100c6b46cf | ||
|
|
36ccaf7ea4 | ||
|
|
4a655db1ba | ||
|
|
2a80586504 | ||
|
|
0a942118c1 | ||
|
|
2f1ad67fb3 | ||
|
|
4b0ac6d0e3 | ||
|
|
ac273da46c | ||
|
|
7a6594c96d | ||
|
|
657b7465c6 | ||
|
|
08059a845f | ||
|
|
24e2042c9d | ||
|
|
9771f054ea | ||
|
|
5db4e2adfa | ||
|
|
bde5db49a7 | ||
|
|
7c6b2730f5 | ||
|
|
7f6a15fbb7 | ||
|
|
d97b1e5944 | ||
|
|
1519196c95 | ||
|
|
f8629a2689 | ||
|
|
fadecd52ee | ||
|
|
524d6e4fc1 | ||
|
|
51f5f5393c | ||
|
|
f574afa9d2 | ||
|
|
4f49015a6e | ||
|
|
f25d12b53d | ||
|
|
78259c3eb6 | ||
|
|
adc922e4cd | ||
|
|
f68194227c | ||
|
|
29a3ce0ba2 | ||
|
|
e5fe27dd54 | ||
|
|
6016ffe7d7 | ||
|
|
7c94a6f689 | ||
|
|
5953ffe10b | ||
|
|
3016019560 | ||
|
|
0d5da73c74 | ||
|
|
91c835fcb4 | ||
|
|
d01ae0ff39 | ||
|
|
63b85da4f6 | ||
|
|
2406e72210 | ||
|
|
32e1edc2a2 | ||
|
|
84225e982f | ||
|
|
e76a06e942 | ||
|
|
0519682c30 | ||
|
|
91f7a81964 | ||
|
|
a66fcaf04c | ||
|
|
9a0649e671 | ||
|
|
d23ad0cd8f | ||
|
|
63755c1cd3 | ||
|
|
149cf79615 | ||
|
|
a627128570 | ||
|
|
91e3078d2e | ||
|
|
31dd943141 | ||
|
|
3866701475 | ||
|
|
521f8e9889 | ||
|
|
49c3fdd3b2 | ||
|
|
4bb6a49ee0 | ||
|
|
db8e8377ac | ||
|
|
510a37be44 | ||
|
|
230bd73597 | ||
|
|
7217c0ee1d | ||
|
|
77d455d894 | ||
|
|
4f9c8275d9 | ||
|
|
f11aae2514 | ||
|
|
f1e9efc38c | ||
|
|
975cc4f7df | ||
|
|
01243280a2 | ||
|
|
e652c3639d | ||
|
|
1e0954d9b4 | ||
|
|
bb21f9bbc9 | ||
|
|
25466e7950 | ||
|
|
78026ff9b8 | ||
|
|
60c7ef4e53 | ||
|
|
55952934ed | ||
|
|
f9d31f4c30 | ||
|
|
ec037e422b | ||
|
|
73f66f25d8 | ||
|
|
28174b6cf9 | ||
|
|
a80c095a07 | ||
|
|
374574717d | ||
|
|
efce5d1ad4 | ||
|
|
4eec91a9e6 | ||
|
|
57166f33aa | ||
|
|
f1ebb1f0fb | ||
|
|
8182e4df96 | ||
|
|
6f3580820c | ||
|
|
7b7fd2a4b4 | ||
|
|
f74addd0ca | ||
|
|
21ba3b7713 | ||
|
|
4eca15f822 | ||
|
|
4205f619cf | ||
|
|
c3dff823ef | ||
|
|
39876d1388 | ||
|
|
7e34aa0287 | ||
|
|
3bdafed38e | ||
|
|
3d438ff304 | ||
|
|
bcd1f37085 | ||
|
|
ba928e59fc | ||
|
|
62c01ef7d6 | ||
|
|
a71347e328 | ||
|
|
f235cfa13c | ||
|
|
45b397682b | ||
|
|
858ad02973 | ||
|
|
defeef093d | ||
|
|
b45615e2c3 | ||
|
|
b158babb7f | ||
|
|
59b7386b91 | ||
|
|
c05bcd23d9 | ||
|
|
3cb91855c8 | ||
|
|
dc0850ef3e | ||
|
|
ffdd7fda45 | ||
|
|
83b2133573 | ||
|
|
d04856f964 | ||
|
|
8373d5302f | ||
|
|
7d7cb0eadb | ||
|
|
c00c87f8f2 | ||
|
|
f599462ad7 | ||
|
|
018282f392 | ||
|
|
23b3c1c05a | ||
|
|
62686d0b7a | ||
|
|
54288502a2 | ||
|
|
efc045e40b | ||
|
|
6e9b16511f | ||
|
|
81b6e60a8c | ||
|
|
5baaf7e00a | ||
|
|
d4d460397f | ||
|
|
f66b6b2ee3 | ||
|
|
fb7f7fd8c8 | ||
|
|
dc98fa21a9 | ||
|
|
6b662d3e4c | ||
|
|
7069682c8e | ||
|
|
3b1d5b93a8 | ||
|
|
611fe55e90 | ||
|
|
481272ac22 | ||
|
|
9069e2d7db | ||
|
|
1144c16a4c | ||
|
|
9b2846633c | ||
|
|
db88c0a5bf | ||
|
|
28863728c2 | ||
|
|
dc341da813 | ||
|
|
1d210ad808 | ||
|
|
76d9adadf5 | ||
|
|
065371fa99 | ||
|
|
e45f21454e | ||
|
|
4d8c13b0bf | ||
|
|
5e6b38e8d1 | ||
|
|
5dd2b6313e | ||
|
|
96bf00c59b | ||
|
|
065310901f | ||
|
|
103261ea35 | ||
|
|
c6fb6f30af | ||
|
|
134d24e23b | ||
|
|
8a8e97f6e2 | ||
|
|
29c1bc47d4 | ||
|
|
8af5a10407 | ||
|
|
8e9991a860 | ||
|
|
8ba5d6d736 | ||
|
|
b3d1e2133d | ||
|
|
b3cf30f804 | ||
|
|
b845543e5f | ||
|
|
1b54470046 | ||
|
|
ee2b20926c | ||
|
|
96a76d2a19 | ||
|
|
78ac91fcd5 | ||
|
|
971b5b0e6d | ||
|
|
3393563498 | ||
|
|
94483ebfec | ||
|
|
db5d5878f5 | ||
|
|
2585daac9b | ||
|
|
21043108b3 | ||
|
|
65faa4507c | ||
|
|
644f2b9c7a | ||
|
|
dab9d9fb7e | ||
|
|
139757613b | ||
|
|
10eed2c1c7 | ||
|
|
c4fa75a1da | ||
|
|
847140560f | ||
|
|
1dc07c33ab | ||
|
|
4fc73d95c0 | ||
|
|
690ed55953 | ||
|
|
16a5809518 | ||
|
|
0fed34600b | ||
|
|
2d95f4177a | ||
|
|
e9d1db56fa | ||
|
|
a076a766a9 | ||
|
|
be3c78bf54 | ||
|
|
5bb6b8e2ab | ||
|
|
f79193681d | ||
|
|
379ae69db5 | ||
|
|
cde89f50a2 | ||
|
|
98563ba1ce | ||
|
|
99bf8cc39f | ||
|
|
ea642cf90e | ||
|
|
a7d89062cf | ||
|
|
564e4856b4 | ||
|
|
011cd08af3 | ||
|
|
e294a4726c | ||
|
|
a21e8b0849 | ||
|
|
cc6f36b50f | ||
|
|
6e35162788 | ||
|
|
30ca940eeb | ||
|
|
14eb19a42b | ||
|
|
da053ecde2 | ||
|
|
c86ef7de45 | ||
|
|
c5572020b9 | ||
|
|
3d7ed3a3f7 | ||
|
|
138163056f | ||
|
|
5e78e00bc9 | ||
|
|
2cb06edc2d | ||
|
|
8915bfb443 | ||
|
|
24017ad83f | ||
|
|
3edebe3747 | ||
|
|
636a2d5c87 | ||
|
|
4213aba76e | ||
|
|
3e822bad82 | ||
|
|
cd5b06b9c7 | ||
|
|
b0841562ea | ||
|
|
06f70e8246 | ||
|
|
9614f8761a | ||
|
|
92f9ab9001 | ||
|
|
ad554f89fc | ||
|
|
5bb37dff49 | ||
|
|
0d52dc2290 | ||
|
|
c575cb9cd5 | ||
|
|
9cdccd40c7 | ||
|
|
fdd10c5a98 | ||
|
|
8a617fdbc7 | ||
|
|
a058a74d8f | ||
|
|
4896a3265e | ||
|
|
131947275c | ||
|
|
1b7e8cec5e | ||
|
|
c17c0ea9aa | ||
|
|
7b378d2425 | ||
|
|
47da7d8278 | ||
|
|
3c69941fcd | ||
|
|
beb188facf | ||
|
|
dfea8f6535 | ||
|
|
3b89149bf0 | ||
|
|
c8d73caacd | ||
|
|
290185f16b | ||
|
|
05e9d36eed | ||
|
|
05815fcbf3 | ||
|
|
bce900a4ca | ||
|
|
bf7ba49013 | ||
|
|
323aa075b3 | ||
|
|
f526014dc8 | ||
|
|
dec546fa65 | ||
|
|
36390a7921 | ||
|
|
313d705778 | ||
|
|
ca34efa2d7 | ||
|
|
25e92cfe39 | ||
|
|
999359e81a | ||
|
|
3a74248746 | ||
|
|
cb828ecbd3 | ||
|
|
e1e984e02d | ||
|
|
d6e19fe350 | ||
|
|
1f91c748b5 | ||
|
|
38356acb4e | ||
|
|
7b2d598c38 | ||
|
|
c276eb0cfa | ||
|
|
571de591ca | ||
|
|
e49a197fd5 | ||
|
|
a30eabc23a | ||
|
|
73c4cddba5 | ||
|
|
6e341f770a | ||
|
|
527145ec81 | ||
|
|
c93edceffe | ||
|
|
6f9eac7c8e | ||
|
|
522420ef34 | ||
|
|
927bf052b0 | ||
|
|
1e44689b79 | ||
|
|
b967865faa | ||
|
|
054c0cafb2 | ||
|
|
29e37c8e2b | ||
|
|
44fc2afdc7 | ||
|
|
7776c8ee38 | ||
|
|
9ee7e1873f | ||
|
|
e21fcbd1bd | ||
|
|
cb407e75ab | ||
|
|
27d4612449 | ||
|
|
43ab5f79b6 | ||
|
|
5852ab513d | ||
|
|
3fe33e4e9e | ||
|
|
c44b90b5a4 | ||
|
|
f06dc6548c | ||
|
|
e13552c306 | ||
|
|
0305c3783f | ||
|
|
5158ac3d98 | ||
|
|
25c08b0885 | ||
|
|
f8131c97e9 | ||
|
|
3de1fab66a | ||
|
|
ab664128b7 | ||
|
|
91de693b80 | ||
|
|
a64606fb32 | ||
|
|
58d9103bd2 | ||
|
|
61ab5be12d | ||
|
|
030900b602 | ||
|
|
476d689c7d | ||
|
|
4aedbb69c2 | ||
|
|
db2a68709c | ||
|
|
f114a89136 | ||
|
|
96eda76391 | ||
|
|
e7d9a8fa2d | ||
|
|
1cca8db828 | ||
|
|
2cde665d2f | ||
|
|
d660c6342f | ||
|
|
7e8bb0e51f | ||
|
|
c87f4cc088 | ||
|
|
05c50349a8 | ||
|
|
e985952816 | ||
|
|
19f0ef9c86 | ||
|
|
cc8e13a85f | ||
|
|
6475a05794 | ||
|
|
cc9840afe5 | ||
|
|
b7a2cde458 | ||
|
|
453992b55d | ||
|
|
0b1067f95e | ||
|
|
21777cd95b | ||
|
|
827ad3bdf2 | ||
|
|
7818157cd0 | ||
|
|
d547241714 | ||
|
|
c41e0e9286 | ||
|
|
c2d4784895 | ||
|
|
11163965cf | ||
|
|
e9df065820 | ||
|
|
101ab0c11a | ||
|
|
25f081c0b7 | ||
|
|
700baef094 | ||
|
|
3faa586b16 | ||
|
|
8ca77fe8a4 | ||
|
|
019829cc4d | ||
|
|
a7f6bb223a | ||
|
|
eb77a8f328 | ||
|
|
5a484b2667 | ||
|
|
982c35f8e7 | ||
|
|
adffe5f47f | ||
|
|
f90a194b86 | ||
|
|
99e9356e5d | ||
|
|
860840a4c1 | ||
|
|
ab63b76ae0 | ||
|
|
29bca726b3 | ||
|
|
91297a68f8 | ||
|
|
2bea8ade63 | ||
|
|
ec486cf78c | ||
|
|
63ac378866 | ||
|
|
35db387fc2 | ||
|
|
a0f9baf5e7 | ||
|
|
4e54a79abc | ||
|
|
37bea7158f | ||
|
|
618fe4e959 | ||
|
|
0c73144977 | ||
|
|
ff8c3b1595 | ||
|
|
b756d0d0dc | ||
|
|
23147fafd1 | ||
|
|
b036b5f24b | ||
|
|
3b9014f750 | ||
|
|
6ad7a285e7 | ||
|
|
e529eaed2d | ||
|
|
4697c6c6ad | ||
|
|
56e47c3550 | ||
|
|
b3a9ba339c | ||
|
|
8d0ce37a59 | ||
|
|
a1bbbd6f7b | ||
|
|
de87743c6a | ||
|
|
9d4a72a4ba | ||
|
|
19e39aea49 | ||
|
|
da064a6005 | ||
|
|
a12a38a7a9 | ||
|
|
2c3a442a4c | ||
|
|
25d560cf46 | ||
|
|
c3324cf64c | ||
|
|
053bbe7028 | ||
|
|
74f980437f | ||
|
|
5ef96a29ac | ||
|
|
f261e7aa96 | ||
|
|
8e44e911ca | ||
|
|
6711ba06d9 | ||
|
|
fce69b6bb2 | ||
|
|
1183e2e4bf | ||
|
|
de3082e48f | ||
|
|
3acac34e4b | ||
|
|
f97bd2b357 | ||
|
|
3bac124921 | ||
|
|
ba44603d0f | ||
|
|
358f844c88 | ||
|
|
74bf2d742c | ||
|
|
acba3d5467 | ||
|
|
cfc066c8ea | ||
|
|
4f69f19866 | ||
|
|
c508f88af2 | ||
|
|
9757fdb42f | ||
|
|
24d57f535b | ||
|
|
e42dfc0726 | ||
|
|
3f54f23535 | ||
|
|
c7198b3538 | ||
|
|
827d10dd49 | ||
|
|
1b7a072f25 | ||
|
|
af1347ca17 | ||
|
|
f741cf5b23 | ||
|
|
eb1b3c8729 | ||
|
|
40e4678a45 | ||
|
|
d3c0a60de9 | ||
|
|
83bba80f3b | ||
|
|
44e0cfb878 | ||
|
|
a58e21e313 | ||
|
|
1131635006 | ||
|
|
c6e477e6ab | ||
|
|
18a81120bc | ||
|
|
17cd67f4d0 | ||
|
|
38a40d56a0 | ||
|
|
96fd2e26fd | ||
|
|
581bbc847d | ||
|
|
da7cbc41d2 | ||
|
|
282e22acb9 | ||
|
|
9374eebdf6 | ||
|
|
dcd5c5b39a | ||
|
|
974f8ee244 | ||
|
|
8212aa378a | ||
|
|
403d4c6408 | ||
|
|
142681fa27 | ||
|
|
69c9141817 | ||
|
|
9ed88e7608 | ||
|
|
b216f4d90b | ||
|
|
26ee07d8ba | ||
|
|
a8e5b02fb4 | ||
|
|
80a8912a53 | ||
|
|
1ba6797f25 | ||
|
|
11a2167dea | ||
|
|
af4eeb6e6b | ||
|
|
ea6459e2bd | ||
|
|
2dfa5a9660 | ||
|
|
b86434fbd3 | ||
|
|
223525cc69 | ||
|
|
fd63c079c8 | ||
|
|
ebe4511c57 | ||
|
|
e1a81ef8f3 | ||
|
|
3382c83d6e | ||
|
|
a89430673f | ||
|
|
fcea6dbdb6 | ||
|
|
c744a7d168 | ||
|
|
0256dc8640 | ||
|
|
41819794d7 | ||
|
|
836903cb02 | ||
|
|
7f822d33b5 | ||
|
|
232fdbb1ff | ||
|
|
f3f6111622 | ||
|
|
a8378e7e7f | ||
|
|
eb3165096f | ||
|
|
90ca58cda8 | ||
|
|
5a81526387 | ||
|
|
8df073b8ac | ||
|
|
0f7356b002 | ||
|
|
0c2166fb5f | ||
|
|
d228222fa6 | ||
|
|
e4b7d3244e | ||
|
|
7d0e841a73 | ||
|
|
9859e441e1 | ||
|
|
e1c638439f | ||
|
|
253aaaad97 | ||
|
|
a249ccc12b | ||
|
|
22fb898267 | ||
|
|
e038797875 | ||
|
|
7b9f9e23c0 | ||
|
|
01d062a68f | ||
|
|
a66dfb5faf | ||
|
|
ac1480680a | ||
|
|
13a9b5ca00 | ||
|
|
0cdf6abf0b | ||
|
|
2071694983 | ||
|
|
12e2b18a6f | ||
|
|
28e128756d | ||
|
|
a15109a12c | ||
|
|
e500fb57e8 | ||
|
|
f1849092eb | ||
|
|
f1dbd7e8a6 | ||
|
|
d417f454dd | ||
|
|
d79718d834 | ||
|
|
de9c3a1550 | ||
|
|
90fc7a4ed3 | ||
|
|
09edbc24bc | ||
|
|
92f8701c37 | ||
|
|
c828888770 | ||
|
|
bb7728e7e7 | ||
|
|
5f544f9c78 | ||
|
|
5b6a7d1995 | ||
|
|
b21185dde7 | ||
|
|
deaee0dc82 | ||
|
|
4206346496 | ||
|
|
6658b632b3 | ||
|
|
d9be7160ef | ||
|
|
d56424a287 | ||
|
|
2d397c5cb8 | ||
|
|
08004caa5d | ||
|
|
522358a004 | ||
|
|
e00a6c36e3 | ||
|
|
4664497cbc | ||
|
|
6be424bde5 | ||
|
|
0903438242 | ||
|
|
b874b68e57 | ||
|
|
6af9385c5f | ||
|
|
29398ac33b | ||
|
|
7525739b24 | ||
|
|
50ff71f3f3 | ||
|
|
70a9215c9d | ||
|
|
9c1a5d9a7d | ||
|
|
9a9b4a6892 | ||
|
|
e5502c724e | ||
|
|
125878e280 | ||
|
|
b4c1498ca1 | ||
|
|
88d534a7f2 | ||
|
|
6ce4ed0937 | ||
|
|
1b9ba62dc8 | ||
|
|
f3639e6200 | ||
|
|
1fe56cf401 | ||
|
|
a3add3d816 | ||
|
|
2807de2123 | ||
|
|
5029b956d2 | ||
|
|
815aaefad9 | ||
|
|
7ea740f647 | ||
|
|
eaf25e5b36 | ||
|
|
3b336a9127 | ||
|
|
cc4d1fd1c7 | ||
|
|
17ec6441a0 | ||
|
|
a1b107cecb | ||
|
|
2e06bc2352 | ||
|
|
af0a239bd9 | ||
|
|
92939ca3f2 | ||
|
|
8d15dba26d | ||
|
|
cdca5655fc | ||
|
|
7f72ee1296 | ||
|
|
5438fca35a |
20
.gitignore
vendored
20
.gitignore
vendored
@@ -1,11 +1,23 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
prepare-vms/ips.txt
|
||||
prepare-vms/ips.html
|
||||
prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
31
CHECKLIST.md
31
CHECKLIST.md
@@ -1,19 +1,24 @@
|
||||
This is the checklist that I (Jérôme) use when delivering a workshop.
|
||||
Checklist to use when delivering a workshop
|
||||
Authored by Jérôme; additions by Bridget
|
||||
|
||||
- [ ] Create branch + `_redirects` + push to GitHub + Netlify setup
|
||||
- [ ] Add branch to index.html
|
||||
- [ ] Update the slides that says which versions we are using
|
||||
- [ ] Update the version of Compose and Machine in settings
|
||||
- [ ] Create chatroom
|
||||
- [ ] Set chatroom in YML and deploy
|
||||
- [ ] Put chat room in index.html
|
||||
- [ ] Walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] How many VMs do we need?
|
||||
- [ ] Provision VMs
|
||||
- [ ] Create event-named branch (such as `conferenceYYYY`) in the [main repo](https://github.com/jpetazzo/container.training/)
|
||||
- [ ] Create file `slides/_redirects` containing a link to the desired tutorial: `/ /kube-halfday.yml.html 200`
|
||||
- [ ] Push local branch to GitHub and merge into main repo
|
||||
- [ ] [Netlify setup](https://app.netlify.com/sites/container-training/settings/domain): create subdomain for event-named branch
|
||||
- [ ] Add link to event-named branch to [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] Update the slides that says which versions we are using for [kube](https://github.com/jpetazzo/container.training/blob/master/slides/kube/versions-k8s.md) or [swarm](https://github.com/jpetazzo/container.training/blob/master/slides/swarm/versions.md) workshops
|
||||
- [ ] Update the version of Compose and Machine in [settings](https://github.com/jpetazzo/container.training/tree/master/prepare-vms/settings)
|
||||
- [ ] (optional) Create chatroom
|
||||
- [ ] (optional) Set chatroom in YML ([kube half-day example](https://github.com/jpetazzo/container.training/blob/master/slides/kube-halfday.yml#L6-L8)) and deploy
|
||||
- [ ] (optional) Put chat link on [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] How many VMs do we need? Check with event organizers ahead of time
|
||||
- [ ] Provision VMs (slightly more than we think we'll need)
|
||||
- [ ] Change password on presenter's VMs (to forestall any hijinx)
|
||||
- [ ] Onsite: walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] Print cards
|
||||
- [ ] Cut cards
|
||||
- [ ] Last minute merge from master
|
||||
- [ ] Last-minute merge from master
|
||||
- [ ] Check that all looks good
|
||||
- [ ] DELIVER!
|
||||
- [ ] Shutdown VMs
|
||||
- [ ] Shut down VMs
|
||||
- [ ] Update index.html to remove chat link and move session to past things
|
||||
|
||||
30
README.md
30
README.md
@@ -199,7 +199,7 @@ this section is for you!
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from loosing your place if you get disconnected from servers.
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
@@ -292,15 +292,31 @@ If there is a bug and you can't even reproduce it:
|
||||
sorry. It is probably an Heisenbug. We can't act on it
|
||||
until it's reproducible, alas.
|
||||
|
||||
If you have attended this workshop and have feedback,
|
||||
or if you want somebody to deliver that workshop at your
|
||||
conference or for your company: you can contact one of us!
|
||||
|
||||
- jerome at docker dot com
|
||||
# “Please teach us!”
|
||||
|
||||
If you have attended one of these workshops, and want
|
||||
your team or organization to attend a similar one, you
|
||||
can look at the list of upcoming events on
|
||||
http://container.training/.
|
||||
|
||||
You are also welcome to reuse these materials to run
|
||||
your own workshop, for your team or even at a meetup
|
||||
or conference. In that case, you might enjoy watching
|
||||
[Bridget Kromhout's talk at KubeCon 2018 Europe](
|
||||
https://www.youtube.com/watch?v=mYsp_cGY2O0), explaining
|
||||
precisely how to run such a workshop yourself.
|
||||
|
||||
Finally, you can also contact the following persons,
|
||||
who are experienced speakers, are familiar with the
|
||||
material, and are available to deliver these workshops
|
||||
at your conference or for your company:
|
||||
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!
|
||||
(If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!)
|
||||
|
||||
**Thank you!**
|
||||
|
||||
|
||||
@@ -5,6 +5,3 @@ RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
HEALTHCHECK \
|
||||
--interval=1s --timeout=2s --retries=3 --start-period=1s \
|
||||
CMD curl http://localhost/ || exit 1
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=80)
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
|
||||
|
||||
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
image: elasticsearch:2
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
image: kibana:4
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
90
k8s/consul.yaml
Normal file
90
k8s/consul.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.0"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
28
k8s/docker-build.yaml
Normal file
28
k8s/docker-build.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||
227
k8s/efk.yaml
Normal file
227
k8s/efk.yaml
Normal file
@@ -0,0 +1,227 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
# X-Pack Authentication
|
||||
# =====================
|
||||
- name: FLUENT_ELASTICSEARCH_USER
|
||||
value: "elastic"
|
||||
- name: FLUENT_ELASTICSEARCH_PASSWORD
|
||||
value: "changeme"
|
||||
- name: FLUENT_UID
|
||||
value: "0"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: elasticsearch
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5.6.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: elasticsearch
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /api/v1/namespaces/default/services/elasticsearch
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
run: elasticsearch
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: kibana
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5.6.8
|
||||
imagePullPolicy: Always
|
||||
name: kibana
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /api/v1/namespaces/default/services/kibana
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
run: kibana
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
14
k8s/grant-admin-to-dashboard.yaml
Normal file
14
k8s/grant-admin-to-dashboard.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
18
k8s/haproxy.cfg
Normal file
18
k8s/haproxy.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend the-frontend
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server ibm.fr-80 ibm.fr:80 maxconn 32 check
|
||||
|
||||
16
k8s/haproxy.yaml
Normal file
16
k8s/haproxy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
14
k8s/ingress.yaml
Normal file
14
k8s/ingress.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
10
k8s/just-a-pod.yaml
Normal file
10
k8s/just-a-pod.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
Kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: nginx
|
||||
|
||||
29
k8s/kaniko-build.yaml
Normal file
29
k8s/kaniko-build.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--insecure"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
|
||||
167
k8s/kubernetes-dashboard.yaml
Normal file
167
k8s/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-testcurl-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
run: testcurl
|
||||
|
||||
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-all-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress: []
|
||||
|
||||
22
k8s/netpol-dockercoins.yaml
Normal file
22
k8s/netpol-dockercoins.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
21
k8s/nginx-with-volume.yaml
Normal file
21
k8s/nginx-with-volume.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
580
k8s/portworx.yaml
Normal file
580
k8s/portworx.yaml
Normal file
@@ -0,0 +1,580 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: stork-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.cfg: |-
|
||||
{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"extenders": [
|
||||
{
|
||||
"urlPrefix": "http://stork-service.kube-system.svc:8099",
|
||||
"apiVersion": "v1beta1",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 5,
|
||||
"enableHttps": false,
|
||||
"nodeCacheCapable": false
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "list", "watch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["deployments", "deployments/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["statefulsets", "statefulsets/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: stork-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
name: stork
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8099
|
||||
targetPort: 8099
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
tier: control-plane
|
||||
name: stork
|
||||
namespace: kube-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
name: stork
|
||||
tier: control-plane
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /stork
|
||||
- --driver=pxd
|
||||
- --verbose
|
||||
- --leader-elect=true
|
||||
- --health-monitor-interval=120
|
||||
imagePullPolicy: Always
|
||||
image: openstorage/stork:1.1.3
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
name: stork
|
||||
hostPID: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
serviceAccountName: stork-account
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-snapshot-sc
|
||||
provisioner: stork-snapshot
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resourceNames: ["kube-scheduler"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["delete", "get", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["bindings", "pods/binding"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers", "services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["app", "extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-scheduler-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
name: stork-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=true
|
||||
- --scheduler-name=stork
|
||||
- --policy-configmap=stork-config
|
||||
- --policy-configmap-namespace=kube-system
|
||||
- --lock-object-name=stork-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
name: stork-scheduler
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork-scheduler
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
hostPID: false
|
||||
serviceAccountName: stork-scheduler-account
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: portworx-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
selector:
|
||||
name: portworx
|
||||
ports:
|
||||
- name: px-api
|
||||
protocol: TCP
|
||||
port: 9001
|
||||
targetPort: 9001
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-get-put-list-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "get", "update", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "update", "create"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: node-get-put-list-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: portworx
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role
|
||||
namespace: portworx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role-binding
|
||||
namespace: portworx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: portworx
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true"
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: px/enabled
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
containers:
|
||||
- name: portworx
|
||||
image: portworx/oci-monitor:1.4.2.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
["-c", "px-workshop", "-s", "/dev/loop4", "-b",
|
||||
"-x", "kubernetes"]
|
||||
env:
|
||||
- name: "PX_TEMPLATE_VERSION"
|
||||
value: "v4"
|
||||
|
||||
livenessProbe:
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 840 # allow image pull in slow networks
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 9001
|
||||
readinessProbe:
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 9015
|
||||
terminationMessagePath: "/tmp/px-termination-log"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
mountPath: /etc/pwx
|
||||
- name: optpwx
|
||||
mountPath: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
mountPath: /host_proc/1/ns
|
||||
- name: sysdmount
|
||||
mountPath: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
mountPath: /var/cores
|
||||
- name: journalmount1
|
||||
mountPath: /var/run/log
|
||||
readOnly: true
|
||||
- name: journalmount2
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
- name: dbusmount
|
||||
mountPath: /var/run/dbus
|
||||
restartPolicy: Always
|
||||
serviceAccountName: px-account
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
hostPath:
|
||||
path: /etc/pwx
|
||||
- name: optpwx
|
||||
hostPath:
|
||||
path: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: sysdmount
|
||||
hostPath:
|
||||
path: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
hostPath:
|
||||
path: /var/cores
|
||||
- name: journalmount1
|
||||
hostPath:
|
||||
path: /var/run/log
|
||||
- name: journalmount2
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dbusmount
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-lh-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
nodePort: 32678
|
||||
- name: https
|
||||
port: 443
|
||||
nodePort: 32679
|
||||
selector:
|
||||
tier: px-web-console
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: px-web-console
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "init"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
containers:
|
||||
- name: px-lighthouse
|
||||
image: portworx/px-lighthouse:1.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
- name: config-sync
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "sync"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
serviceAccountName: px-lh-account
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
30
k8s/postgres.yaml
Normal file
30
k8s/postgres.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
15
k8s/registry.yaml
Normal file
15
k8s/registry.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
|
||||
67
k8s/socat.yaml
Normal file
67
k8s/socat.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
100
k8s/traefik.yaml
Normal file
100
k8s/traefik.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -32,7 +32,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ source path/to/your-ansible-clone/hacking/env-setup
|
||||
|
||||
- you need to repeat the last step everytime you open a new terminal session
|
||||
- you need to repeat the last step every time you open a new terminal session
|
||||
and want to use any Ansible command (but you'll probably only need to run
|
||||
it once).
|
||||
|
||||
|
||||
@@ -1,4 +1,10 @@
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS
|
||||
# Trainer tools to create and prepare VMs for Docker workshops
|
||||
|
||||
These tools can help you to create VMs on:
|
||||
|
||||
- Azure
|
||||
- EC2
|
||||
- OpenStack
|
||||
|
||||
## Prerequisites
|
||||
|
||||
@@ -6,6 +12,9 @@
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
@@ -14,19 +23,25 @@ And if you want to generate printable cards:
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- set required environment variables for AWS
|
||||
- create an infrastructure configuration in the `prepare-vms/infra` directory
|
||||
(using one of the example files in that directory)
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
|
||||
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl start` to create instances
|
||||
- run `./workshopctl deploy` to install Docker and setup environment
|
||||
- run `./workshopctl kube` (if you want to install and setup Kubernetes)
|
||||
- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info)
|
||||
- run `./workshopctl stop` at the end of the workshop to terminate instances
|
||||
|
||||
## Clone/Fork the Repo, and Build the Tools Image
|
||||
|
||||
The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5).
|
||||
|
||||
$ git clone https://github.com/jpetazzo/orchestration-workshop.git
|
||||
$ cd orchestration-workshop/prepare-vms
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training/prepare-vms
|
||||
$ docker-compose build
|
||||
|
||||
|
||||
## Preparing to Run `./workshopctl`
|
||||
|
||||
### Required AWS Permissions/Info
|
||||
@@ -35,27 +50,37 @@ The Docker Compose file here is used to build a image with all the dependencies
|
||||
- Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that.
|
||||
- These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports.
|
||||
|
||||
### Required Environment Variables
|
||||
### Create your `infra` file
|
||||
|
||||
- `AWS_ACCESS_KEY_ID`
|
||||
- `AWS_SECRET_ACCESS_KEY`
|
||||
- `AWS_DEFAULT_REGION`
|
||||
You need to do this only once. (On AWS, you can create one `infra`
|
||||
file per region.)
|
||||
|
||||
If you're not using AWS, set these to placeholder values:
|
||||
Make a copy of one of the example files in the `infra` directory.
|
||||
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
cp infra/example.aws infra/aws-us-west-2
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="foo"
|
||||
export AWS_SECRET_ACCESS_KEY="foo"
|
||||
export AWS_DEFAULT_REGION="foo"
|
||||
```
|
||||
|
||||
Edit your infrastructure file to customize it.
|
||||
You will probably need to put your cloud provider credentials,
|
||||
select region...
|
||||
|
||||
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
|
||||
|
||||
### Update/copy `settings/example.yaml`
|
||||
### Create your `settings` file
|
||||
|
||||
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc.
|
||||
Similarly, pick one of the files in `settings` and copy it
|
||||
to customize it.
|
||||
|
||||
./workshopctl cards 2016-09-28-00-33-bret settings/orchestration.yaml
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
cp settings/example.yaml settings/myworkshop.yaml
|
||||
```
|
||||
|
||||
You're all set!
|
||||
|
||||
## `./workshopctl` Usage
|
||||
|
||||
@@ -65,7 +90,7 @@ Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a batch of VMs
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
@@ -73,14 +98,14 @@ ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available batches in the current region
|
||||
list List available groups in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a batch of VMs
|
||||
start Start a batch of VMs
|
||||
status List instance status for a given batch
|
||||
retag Apply a new tag to a group of VMs
|
||||
start Start a group of VMs
|
||||
status List instance status for a given group
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a batch of VMs
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
wrap Run this program in a container
|
||||
```
|
||||
|
||||
@@ -92,24 +117,24 @@ wrap Run this program in a container
|
||||
- The `./workshopctl` script can be executed directly.
|
||||
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a Batch of AWS Instances for a Workshop
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start N` Creates `N` EC2 instances
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `lib/postprep.py` via parallel-ssh
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
- If it errors or times out, you should be able to rerun
|
||||
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
|
||||
- Run `./workshopctl pull-images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students
|
||||
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students
|
||||
- *Have a great workshop*
|
||||
- Run `./workshopctl stop TAG` to terminate instances.
|
||||
|
||||
### Example Steps to Launch Azure Instances
|
||||
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account (`az login`)
|
||||
- Customize `azuredeploy.parameters.json`
|
||||
- Required:
|
||||
- Provide the SSH public key you plan to use for instance configuration
|
||||
@@ -154,27 +179,16 @@ az group delete --resource-group workshop
|
||||
|
||||
### Example Steps to Configure Instances from a non-AWS Source
|
||||
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to ssh into them.
|
||||
- Set placeholder values for [AWS environment variable settings](#required-environment-variables).
|
||||
- Choose a tag. It could be an event name, datestamp, etc. Ensure you have created a directory for your tag: `prepare-vms/tags/<tag>/`
|
||||
- If you have not already generated a file with the IPs to be configured:
|
||||
- The file should be named `prepare-vms/tags/<tag>/ips.txt`
|
||||
- Format is one IP per line, no other info needed.
|
||||
- Ensure the settings file is as desired (especially the number of nodes): `prepare-vms/settings/kube101.yaml`
|
||||
- For a tag called `myworkshop`, configure instances: `workshopctl deploy myworkshop settings/kube101.yaml`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: `workshopctl kube myworkshop`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: `workshopctl kubetest myworkshop`
|
||||
- Generate cards to print and hand out: `workshopctl cards myworkshop settings/kube101.yaml`
|
||||
- Print the cards file: `prepare-vms/tags/myworkshop/ips.html`
|
||||
|
||||
|
||||
## Other Tools
|
||||
|
||||
### Deploying your SSH key to all the machines
|
||||
|
||||
- Make sure that you have SSH keys loaded (`ssh-add -l`).
|
||||
- Source `rc`.
|
||||
- Run `pcopykey`.
|
||||
- Copy `infra/example.generic` to `infra/generic`
|
||||
- Run `./workshopctl start --infra infra/generic --settings settings/...yaml`
|
||||
- Note the `prepare-vms/tags/TAG/` path that has been auto-created.
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them.
|
||||
- Edit the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info)
|
||||
- Continue deployment of cluster configuration with `./workshopctl deploy TAG`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: workshopctl kube `TAG`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: workshopctl kubetest `TAG`
|
||||
- Generate cards to print and hand out: workshopctl cards `TAG`
|
||||
- Print the cards file: prepare-vms/tags/`TAG`/ips.html
|
||||
|
||||
|
||||
## Even More Details
|
||||
@@ -187,7 +201,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`.
|
||||
|
||||
#### Instance + tag creation
|
||||
|
||||
10 VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
The VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
|
||||
Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
@@ -195,25 +209,21 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
Following the creation of the VMs, a text file will be created containing a list of their IPs.
|
||||
|
||||
This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script.
|
||||
|
||||
If you create new VMs, the symlinked file will be overwritten.
|
||||
|
||||
#### Deployment
|
||||
|
||||
Instances can be deployed manually using the `deploy` command:
|
||||
|
||||
$ ./workshopctl deploy TAG settings/somefile.yaml
|
||||
$ ./workshopctl deploy TAG
|
||||
|
||||
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
|
||||
#### Pre-pull images
|
||||
|
||||
$ ./workshopctl pull-images TAG
|
||||
$ ./workshopctl pull_images TAG
|
||||
|
||||
#### Generate cards
|
||||
|
||||
$ ./workshopctl cards TAG settings/somefile.yaml
|
||||
$ ./workshopctl cards TAG
|
||||
|
||||
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
|
||||
|
||||
@@ -221,13 +231,11 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
#### List VMs
|
||||
$ ./workshopctl listall
|
||||
|
||||
$ ./workshopctl list TAG
|
||||
|
||||
This will print a human-friendly list containing some information about each instance.
|
||||
$ ./workshopctl tags
|
||||
|
||||
#### Stop and destroy VMs
|
||||
|
||||
|
||||
@@ -7,15 +7,6 @@ fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
pip install --user awscli jinja2 pdfkit
|
||||
sudo apt-get install -y wkhtmltopdf xvfb
|
||||
tmux new-session \; send-keys "
|
||||
[ -f ~/.ssh/id_rsa ] || ssh-keygen
|
||||
|
||||
eval \$(ssh-agent)
|
||||
ssh-add
|
||||
Xvfb :0 &
|
||||
export DISPLAY=:0
|
||||
mkdir -p ~/www
|
||||
sudo docker run -d -p 80:80 -v \$HOME/www:/usr/share/nginx/html nginx
|
||||
"
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -qy jq python-pip wkhtmltopdf xvfb
|
||||
pip install --user awscli jinja2 pdfkit pssh
|
||||
|
||||
@@ -7,7 +7,6 @@ services:
|
||||
working_dir: /root/prepare-vms
|
||||
volumes:
|
||||
- $HOME/.aws/:/root/.aws/
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- $SSH_AUTH_SOCK:$SSH_AUTH_SOCK
|
||||
- $PWD/:/root/prepare-vms/
|
||||
environment:
|
||||
|
||||
6
prepare-vms/infra/example.aws
Normal file
6
prepare-vms/infra/example.aws
Normal file
@@ -0,0 +1,6 @@
|
||||
INFRACLASS=aws
|
||||
# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1")
|
||||
# and customize the variables below.
|
||||
export AWS_DEFAULT_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID=AKI...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
2
prepare-vms/infra/example.generic
Normal file
2
prepare-vms/infra/example.generic
Normal file
@@ -0,0 +1,2 @@
|
||||
INFRACLASS=generic
|
||||
# This is for manual provisioning. No other variable or configuration is needed.
|
||||
9
prepare-vms/infra/example.openstack
Normal file
9
prepare-vms/infra/example.openstack
Normal file
@@ -0,0 +1,9 @@
|
||||
INFRACLASS=openstack
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
@@ -1,105 +0,0 @@
|
||||
aws_display_tags() {
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
need_tag $TOKEN
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
@@ -50,27 +50,41 @@ sep() {
|
||||
fi
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
need_infra() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify infrastructure file. (e.g.: infra/aws)"
|
||||
fi
|
||||
if [ "$1" = "--infra" ]; then
|
||||
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Infrastructure file $1 doesn't exist."
|
||||
fi
|
||||
. "$1"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
|
||||
fi
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
fi
|
||||
for FILE in settings.yaml ips.txt infra.sh; do
|
||||
if [ ! -f "tags/$TAG/$FILE" ]; then
|
||||
warning "File tags/$TAG/$FILE not found."
|
||||
fi
|
||||
done
|
||||
. "tags/$TAG/infra.sh"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_settings() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify a settings file."
|
||||
elif [ ! -f "$1" ]; then
|
||||
die "Please specify a settings file. (e.g.: settings/kube101.yaml)"
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Settings file $1 doesn't exist."
|
||||
fi
|
||||
}
|
||||
|
||||
need_ips_file() {
|
||||
IPS_FILE=$1
|
||||
if [ -z "$IPS_FILE" ]; then
|
||||
die "IPS_FILE not set."
|
||||
fi
|
||||
|
||||
if [ ! -s "$IPS_FILE" ]; then
|
||||
die "IPS_FILE $IPS_FILE not found. Please run: $0 ips <TAG>"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -7,21 +7,11 @@ _cmd() {
|
||||
|
||||
_cmd help "Show available commands"
|
||||
_cmd_help() {
|
||||
printf "$(basename $0) - the orchestration workshop swiss army knife\n"
|
||||
printf "$(basename $0) - the container training swiss army knife\n"
|
||||
printf "Commands:"
|
||||
printf "%s" "$HELP" | sort
|
||||
}
|
||||
|
||||
_cmd amis "List Ubuntu AMIs in the current region"
|
||||
_cmd_amis() {
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION "$@"
|
||||
}
|
||||
|
||||
_cmd ami "Show the AMI that will be used for deployment"
|
||||
_cmd_ami() {
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
_cmd build "Build the Docker image to run this program in a container"
|
||||
_cmd_build() {
|
||||
docker-compose build
|
||||
@@ -32,64 +22,53 @@ _cmd_wrap() {
|
||||
docker-compose run --rm workshopctl "$@"
|
||||
}
|
||||
|
||||
_cmd cards "Generate ready-to-print cards for a batch of VMs"
|
||||
_cmd cards "Generate ready-to-print cards for a group of VMs"
|
||||
_cmd_cards() {
|
||||
TAG=$1
|
||||
SETTINGS=$2
|
||||
need_tag $TAG
|
||||
need_settings $SETTINGS
|
||||
need_tag
|
||||
|
||||
# If you're not using AWS, populate the ips.txt file manually
|
||||
if [ ! -f tags/$TAG/ips.txt ]; then
|
||||
aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt
|
||||
fi
|
||||
|
||||
# Remove symlinks to old cards
|
||||
rm -f ips.html ips.pdf
|
||||
|
||||
# This will generate two files in the base dir: ips.pdf and ips.html
|
||||
python lib/ips-txt-to-html.py $SETTINGS
|
||||
|
||||
for f in ips.html ips.pdf; do
|
||||
# Remove old versions of cards if they exist
|
||||
rm -f tags/$TAG/$f
|
||||
|
||||
# Move the generated file and replace it with a symlink
|
||||
mv -f $f tags/$TAG/$f && ln -s tags/$TAG/$f $f
|
||||
done
|
||||
# This will process ips.txt to generate two files: ips.pdf and ips.html
|
||||
(
|
||||
cd tags/$TAG
|
||||
../../lib/ips-txt-to-html.py settings.yaml
|
||||
)
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open ips.html ips.pdf (on Linux)"
|
||||
info "open ips.html ips.pdf (on MacOS)"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
_cmd_deploy() {
|
||||
TAG=$1
|
||||
SETTINGS=$2
|
||||
need_tag $TAG
|
||||
need_settings $SETTINGS
|
||||
link_tag $TAG
|
||||
count=$(wc -l ips.txt)
|
||||
need_tag
|
||||
|
||||
# wait until all hosts are reachable before trying to deploy
|
||||
info "Trying to reach $TAG instances..."
|
||||
while ! tag_is_reachable $TAG; do
|
||||
while ! tag_is_reachable; do
|
||||
>/dev/stderr echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
>/dev/stderr echo ""
|
||||
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
pssh -I tee /tmp/settings.yaml <$SETTINGS
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-setuptools &&
|
||||
sudo easy_install pyyaml"
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <ips.txt
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
|
||||
# Install docker-prompt script
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
@@ -117,14 +96,17 @@ _cmd_deploy() {
|
||||
fi"
|
||||
|
||||
sep "Deployed tag $TAG"
|
||||
echo deployed > tags/$TAG/status
|
||||
info "You may want to run one of the following commands:"
|
||||
info "$0 kube $TAG"
|
||||
info "$0 pull_images $TAG"
|
||||
info "$0 cards $TAG $SETTINGS"
|
||||
info "$0 cards $TAG"
|
||||
}
|
||||
|
||||
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
|
||||
_cmd_kube() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
@@ -132,15 +114,15 @@ _cmd_kube() {
|
||||
sudo apt-key add - &&
|
||||
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh "
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
|
||||
# Initialize kube master
|
||||
pssh --timeout 200 "
|
||||
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init --token \$(cat /tmp/token)
|
||||
fi"
|
||||
|
||||
@@ -157,34 +139,80 @@ _cmd_kube() {
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n')
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
pssh --timeout 200 "
|
||||
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token)
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. /home/ubuntu/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
KUBE_PS1_CTX_COLOR="green"
|
||||
KUBE_PS1_NS_COLOR="green"
|
||||
EOF"
|
||||
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.10.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all notes are reporting as Ready"
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
_cmd_kubereset() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "sudo kubeadm reset --force"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all nodes are reporting as Ready"
|
||||
_cmd_kubetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# There are way too many backslashes in the command below.
|
||||
# Feel free to make that better ♥
|
||||
pssh "
|
||||
set -e
|
||||
[ -f /tmp/node ]
|
||||
if grep -q node1 /tmp/node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "List the instance IDs belonging to a given tag or token"
|
||||
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
@@ -197,248 +225,264 @@ _cmd_ids() {
|
||||
aws_get_instance_ids_by_client_token $TAG
|
||||
}
|
||||
|
||||
_cmd ips "List the IP addresses of the VMs for a given tag or token"
|
||||
_cmd_ips() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
mkdir -p tags/$TAG
|
||||
aws_get_instance_ips_by_tag $TAG | tee tags/$TAG/ips.txt
|
||||
link_tag $TAG
|
||||
}
|
||||
|
||||
_cmd list "List available batches in the current region"
|
||||
_cmd list "List available groups for a given infrastructure"
|
||||
_cmd_list() {
|
||||
info "Listing batches in region $AWS_DEFAULT_REGION:"
|
||||
aws_display_tags
|
||||
need_infra $1
|
||||
infra_list
|
||||
}
|
||||
|
||||
_cmd status "List instance status for a given batch"
|
||||
_cmd_status() {
|
||||
info "Using region $AWS_DEFAULT_REGION."
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
_cmd_listall() {
|
||||
for infra in infra/*; do
|
||||
case $infra in
|
||||
infra/example.*)
|
||||
;;
|
||||
*)
|
||||
info "Listing infrastructure $infra:"
|
||||
need_infra $infra
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
_cmd_netfix () {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
describe_tag $TAG
|
||||
tag_is_reachable $TAG
|
||||
info "You may be interested in running one of the following commands:"
|
||||
info "$0 ips $TAG"
|
||||
info "$0 deploy $TAG <settings/somefile.yaml>"
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo ethtool -K ens3 gro off
|
||||
sudo tee /root/pinger.service <<EOF
|
||||
[Unit]
|
||||
Description=pinger
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/
|
||||
ExecStart=/bin/ping -w60 1.1
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/pinger.service
|
||||
sudo systemctl start pinger"
|
||||
}
|
||||
|
||||
_cmd opensg "Open the default security group to ALL ingress traffic"
|
||||
_cmd_opensg() {
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol icmp \
|
||||
--port -1 \
|
||||
--cidr 0.0.0.0/0
|
||||
need_infra $1
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol udp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
_cmd pssh "Run an arbitrary command on all nodes"
|
||||
_cmd_pssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
shift
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol tcp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
pssh "$@"
|
||||
}
|
||||
|
||||
_cmd pull_images "Pre-pull a bunch of Docker images"
|
||||
_cmd_pull_images() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
pull_tag $TAG
|
||||
need_tag
|
||||
pull_tag
|
||||
}
|
||||
|
||||
_cmd retag "Apply a new tag to a batch of VMs"
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
_cmd_quotas() {
|
||||
need_infra $1
|
||||
infra_quotas
|
||||
}
|
||||
|
||||
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
|
||||
_cmd_retag() {
|
||||
OLDTAG=$1
|
||||
NEWTAG=$2
|
||||
need_tag $OLDTAG
|
||||
TAG=$OLDTAG
|
||||
need_tag
|
||||
if [[ -z "$NEWTAG" ]]; then
|
||||
die "You must specify a new tag to apply."
|
||||
fi
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd start "Start a batch of VMs"
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
# Number of instances to create
|
||||
COUNT=$1
|
||||
# Optional settings file (to carry on with deployment)
|
||||
SETTINGS=$2
|
||||
|
||||
while [ ! -z "$*" ]; do
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$INFRA" ]; then
|
||||
die "Please add --infra flag to specify which infrastructure file to use."
|
||||
fi
|
||||
if [ -z "$SETTINGS" ]; then
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
die "Indicate number of instances to start."
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
fi
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
need_settings $SETTINGS
|
||||
need_infra $INFRA
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(_cmd_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(make_tag)
|
||||
fi
|
||||
TOKEN=$(get_token) # generate a timestamp token for this batch of VMs
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
info " Region: $AWS_DEFAULT_REGION"
|
||||
info " Token/tag: $TOKEN"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--client-token $TOKEN \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
info "Reservation ID: $reservation_id"
|
||||
sep
|
||||
|
||||
# if instance creation succeeded, we should have some IDs
|
||||
IDS=$(aws_get_instance_ids_by_client_token $TOKEN)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Instance creation failed."
|
||||
fi
|
||||
|
||||
# Tag these new instances with a tag that is the same as the token
|
||||
TAG=$TOKEN
|
||||
aws_tag_instances $TOKEN $TAG
|
||||
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
mkdir -p tags/$TAG
|
||||
ln -s ../../$INFRA tags/$TAG/infra.sh
|
||||
ln -s ../../$SETTINGS tags/$TAG/settings.yaml
|
||||
echo creating > tags/$TAG/status
|
||||
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
mkdir -p tags/$TAG
|
||||
IPS=$(aws_get_instance_ips_by_tag $TAG)
|
||||
echo "$IPS" >tags/$TAG/ips.txt
|
||||
link_tag $TAG
|
||||
if [ -n "$SETTINGS" ]; then
|
||||
_cmd_deploy $TAG $SETTINGS
|
||||
else
|
||||
info "To deploy or kill these instances, run one of the following:"
|
||||
info "$0 deploy $TAG <settings/somefile.yaml>"
|
||||
info "$0 stop $TAG"
|
||||
fi
|
||||
}
|
||||
|
||||
_cmd ec2quotas "Check our EC2 quotas (max instances)"
|
||||
_cmd_ec2quotas() {
|
||||
greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
--query 'AccountAttributes[*][AttributeValues]')
|
||||
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
|
||||
|
||||
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
|
||||
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
|
||||
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
|
||||
info "Available regions:"
|
||||
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
|
||||
_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances"
|
||||
_cmd_stop() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_kill_instances_by_tag $TAG
|
||||
need_tag
|
||||
infra_stop
|
||||
echo stopped > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd test "Run tests (pre-flight checks) on a batch of VMs"
|
||||
_cmd tags "List groups of VMs known locally"
|
||||
_cmd_tags() {
|
||||
(
|
||||
cd tags
|
||||
echo "[#] [Status] [Tag] [Infra]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
for tag in *; do
|
||||
if [ -f $tag/ips.txt ]; then
|
||||
count="$(wc -l < $tag/ips.txt)"
|
||||
else
|
||||
count="?"
|
||||
fi
|
||||
if [ -f $tag/status ]; then
|
||||
status="$(cat $tag/status)"
|
||||
else
|
||||
status="?"
|
||||
fi
|
||||
if [ -f $tag/infra.sh ]; then
|
||||
infra="$(basename $(readlink $tag/infra.sh))"
|
||||
else
|
||||
infra="?"
|
||||
fi
|
||||
echo "$count $status $tag $infra" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
_cmd test "Run tests (pre-flight checks) on a group of VMs"
|
||||
_cmd_test() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
test_tag $TAG
|
||||
need_tag
|
||||
test_tag
|
||||
}
|
||||
|
||||
###
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
# Specifically, identify the weave pod that is defective, then:
|
||||
# kubectl -n kube-system exec weave-net-XXXXX -c weave rm /weavedb/weave-netdata.db
|
||||
# kubectl -n kube-system delete pod weave-net-XXXXX
|
||||
_cmd weavetest "Check that weave seems properly setup"
|
||||
_cmd_weavetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
kubectl -n kube-system get pods -o name | grep weave | cut -d/ -f2 |
|
||||
xargs -I POD kubectl -n kube-system exec POD -c weave -- \
|
||||
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
link_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IPS_FILE=tags/$TAG/ips.txt
|
||||
need_ips_file $IPS_FILE
|
||||
ln -sf $IPS_FILE ips.txt
|
||||
}
|
||||
|
||||
pull_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
link_tag $TAG
|
||||
if [ ! -s $IPS_FILE ]; then
|
||||
die "Nonexistent or empty IPs file $IPS_FILE."
|
||||
fi
|
||||
|
||||
# Pre-pull a bunch of images
|
||||
pssh --timeout 900 'for I in \
|
||||
debian:latest \
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
postgres \
|
||||
redis \
|
||||
training/namer \
|
||||
nathanleclaire/redisonrails; do
|
||||
debian:latest \
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
elasticsearch:2 \
|
||||
postgres \
|
||||
redis \
|
||||
alpine \
|
||||
registry \
|
||||
nicolaka/netshoot \
|
||||
jpetazzo/trainingwheels \
|
||||
golang \
|
||||
training/namer \
|
||||
dockercoins/hasher \
|
||||
dockercoins/rng \
|
||||
dockercoins/webui \
|
||||
dockercoins/worker \
|
||||
logstash \
|
||||
prom/node-exporter \
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
sudo -u docker docker pull $I
|
||||
done'
|
||||
|
||||
info "Finished pulling images for $TAG."
|
||||
info "You may now want to run:"
|
||||
info "$0 cards $TAG <settings/somefile.yaml>"
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=50
|
||||
TAG=$1
|
||||
COUNT=$2
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
let "i += 1"
|
||||
info "$(printf "%d/%d instances online" $done_count $COUNT)"
|
||||
done_count=$(aws ec2 describe-instances \
|
||||
--filters "Name=instance-state-name,Values=running" \
|
||||
"Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].State.Name" \
|
||||
| tr "\t" "\n" \
|
||||
| wc -l)
|
||||
|
||||
if [[ $i -gt $max_retry ]]; then
|
||||
die "Timed out while waiting for instance creation (after $max_retry retries)"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
tag_is_reachable() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
link_tag $TAG
|
||||
pssh -t 5 true 2>&1 >/dev/null
|
||||
}
|
||||
|
||||
test_tag() {
|
||||
TAG=$1
|
||||
ips_file=tags/$TAG/ips.txt
|
||||
info "Picking a random IP address in $ips_file to run tests."
|
||||
n=$((1 + $RANDOM % $(wc -l <$ips_file)))
|
||||
ip=$(head -n $n $ips_file | tail -n 1)
|
||||
ip=$(shuf -n1 $ips_file)
|
||||
test_vm $ip
|
||||
info "Tests complete."
|
||||
}
|
||||
@@ -514,17 +558,9 @@ sync_keys() {
|
||||
fi
|
||||
}
|
||||
|
||||
get_token() {
|
||||
make_tag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
date +%Y-%m-%d-%H-%M-$USER
|
||||
}
|
||||
|
||||
describe_tag() {
|
||||
# Display instance details and reachability/status information
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_display_instances_by_tag $TAG
|
||||
aws_display_instance_statuses_by_tag $TAG
|
||||
}
|
||||
|
||||
26
prepare-vms/lib/infra.sh
Normal file
26
prepare-vms/lib/infra.sh
Normal file
@@ -0,0 +1,26 @@
|
||||
# Default stub functions for infrastructure libraries.
|
||||
# When loading an infrastructure library, these functions will be overridden.
|
||||
|
||||
infra_list() {
|
||||
warning "infra_list is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
warning "infra_start is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
warning "infra_stop is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
warning "infra_opensg is unsupported on $INFRACLASS."
|
||||
}
|
||||
206
prepare-vms/lib/infra/aws.sh
Normal file
206
prepare-vms/lib/infra/aws.sh
Normal file
@@ -0,0 +1,206 @@
|
||||
infra_list() {
|
||||
aws_display_tags
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
--query 'AccountAttributes[*][AttributeValues]')
|
||||
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
|
||||
|
||||
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
|
||||
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
|
||||
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
|
||||
info "Available regions:"
|
||||
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(aws_get_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
info " Region: $AWS_DEFAULT_REGION"
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
info "Reservation ID: $reservation_id"
|
||||
sep
|
||||
|
||||
# if instance creation succeeded, we should have some IDs
|
||||
IDS=$(aws_get_instance_ids_by_client_token $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Instance creation failed."
|
||||
fi
|
||||
|
||||
# Tag these new instances with a tag that is the same as the token
|
||||
aws_tag_instances $TAG $TAG
|
||||
|
||||
# Wait until EC2 API tells us that the instances are running
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
aws_kill_instances_by_tag
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol icmp \
|
||||
--port -1 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol udp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol tcp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=50
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
let "i += 1"
|
||||
info "$(printf "%d/%d instances online" $done_count $COUNT)"
|
||||
done_count=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
"Name=instance-state-name,Values=running" \
|
||||
--query "length(Reservations[].Instances[])")
|
||||
if [[ $i -gt $max_retry ]]; then
|
||||
die "Timed out while waiting for instance creation (after $max_retry retries)"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
aws_display_tags() {
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
8
prepare-vms/lib/infra/generic.sh
Normal file
8
prepare-vms/lib/infra/generic.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
info "You should now run your provisioning commands for $COUNT machines."
|
||||
info "Note: no machines have been automatically created!"
|
||||
info "Once done, put the list of IP addresses in tags/$TAG/ips.txt"
|
||||
info "(one IP address per line, without any comments or extra lines)."
|
||||
touch tags/$TAG/ips.txt
|
||||
}
|
||||
20
prepare-vms/lib/infra/openstack.sh
Normal file
20
prepare-vms/lib/infra/openstack.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
@@ -31,7 +31,13 @@ while ips:
|
||||
clusters.append(cluster)
|
||||
|
||||
template_file_name = SETTINGS["cards_template"]
|
||||
template = jinja2.Template(open(template_file_name).read())
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"templates",
|
||||
template_file_name
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(clusters=clusters, **SETTINGS))
|
||||
print("Generated ips.html")
|
||||
|
||||
@@ -13,6 +13,7 @@ COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
#################################
|
||||
|
||||
@@ -45,7 +46,7 @@ def system(cmd):
|
||||
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
system("if mountpoint -q /mnt; then sudo mkdir /mnt/docker && sudo ln -s /mnt/docker /var/lib/docker; fi")
|
||||
system("if mountpoint -q /mnt; then sudo mkdir -p /mnt/docker && sudo ln -sfn /mnt/docker /var/lib/docker; fi")
|
||||
|
||||
# Put our public IP in /tmp/ipv4
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
@@ -54,9 +55,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
|
||||
# Add a "docker" user with password "training"
|
||||
# Add a "docker" user with password coming from the settings
|
||||
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:training | sudo chpasswd")
|
||||
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
@@ -82,7 +83,7 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq python-pip")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
@@ -97,7 +98,6 @@ system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install docker-ce")
|
||||
|
||||
### Install docker-compose
|
||||
#system("sudo pip install -U docker-compose=={}".format(COMPOSE_VERSION))
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/{}/docker-compose-{}-{}".format(COMPOSE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-compose")
|
||||
system("docker-compose version")
|
||||
@@ -108,7 +108,7 @@ system("sudo chmod +x /usr/local/bin/docker-machine")
|
||||
system("docker-machine version")
|
||||
|
||||
system("sudo apt-get remove -y --purge dnsmasq-base")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh tree")
|
||||
|
||||
### Wait for Docker to be up.
|
||||
### (If we don't do this, Docker will not be responsive during the next step.)
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
# This file can be sourced in order to directly run commands on
|
||||
# a batch of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# a group of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# the command is run.
|
||||
|
||||
pssh() {
|
||||
HOSTFILE="ips.txt"
|
||||
if [ -z "$TAG" ]; then
|
||||
>/dev/stderr echo "Variable \$TAG is not set."
|
||||
return
|
||||
fi
|
||||
|
||||
HOSTFILE="tags/$TAG/ips.txt"
|
||||
|
||||
[ -f $HOSTFILE ] || {
|
||||
>/dev/stderr echo "No hostfile found at $HOSTFILE"
|
||||
>/dev/stderr echo "Hostfile $HOSTFILE not found."
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
26
prepare-vms/settings/enix.yaml
Normal file
26
prepare-vms/settings/enix.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: enix.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -22,3 +22,6 @@ engine_version: test
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -17,8 +17,11 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
26
prepare-vms/settings/jerome.yaml
Normal file
26
prepare-vms/settings/jerome.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 4
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: jerome.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
clustersize: 3
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: settings/kube101.html
|
||||
cards_template: kube101.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
@@ -17,8 +17,12 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
clustersize: 3
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
@@ -17,8 +17,11 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.17.1
|
||||
machine_version: 0.13.0
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -85,7 +85,7 @@ img {
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">training</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 42.
|
121
prepare-vms/templates/enix.html
Normal file
121
prepare-vms/templates/enix.html
Normal file
@@ -0,0 +1,121 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://FIXME.container.training" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine virtuelle" -%}
|
||||
{%- set this_or_each = "cette" -%}
|
||||
{%- set plural = "" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "chaque" -%}
|
||||
{%- set plural = "s" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.3em;
|
||||
}
|
||||
|
||||
img.enix {
|
||||
height: 4.0em;
|
||||
margin-top: 0.4em;
|
||||
}
|
||||
|
||||
img.kube {
|
||||
height: 4.2em;
|
||||
margin-top: 1.7em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Voici les informations permettant de se connecter à votre
|
||||
{{ cluster_or_machine }} pour cette formation.
|
||||
Vous pouvez vous connecter à {{ this_or_each }} machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
</p>
|
||||
<p>
|
||||
<img class="enix" src="https://enix.io/static/img/logos/logo-domain-cropped.png" />
|
||||
<table>
|
||||
<tr><td>identifiant:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>mot de passe:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Adresse{{ plural }} IP :
|
||||
<!--<img class="kube" src="{{ image_src }}" />-->
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>Le support de formation est à l'adresse suivante :
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
134
prepare-vms/templates/jerome.html
Normal file
134
prepare-vms/templates/jerome.html
Normal file
@@ -0,0 +1,134 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://qconuk2019.container.training/" -%}
|
||||
{%- set pagesize = 9 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1.0em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
height: 31%;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 30%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted white;
|
||||
}
|
||||
|
||||
div.back p {
|
||||
margin: 0.5em 1em 0 1em;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.8em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 5em;
|
||||
float: right;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<br/>
|
||||
<p>You got this at the workshop
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during QCON London (March 2019).</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team or organization
|
||||
on Docker, container, and Kubernetes,
|
||||
with curriculums of 1 to 5 days.
|
||||
</p>
|
||||
<p>Interested? Contact me at:</p>
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
<p>Thank you!</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -85,7 +85,7 @@ img {
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">training</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
5
prepare-vms/terraform/keypair.tf
Normal file
5
prepare-vms/terraform/keypair.tf
Normal file
@@ -0,0 +1,5 @@
|
||||
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
|
||||
name = "${var.prefix}"
|
||||
public_key = "${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
|
||||
32
prepare-vms/terraform/machines.tf
Normal file
32
prepare-vms/terraform/machines.tf
Normal file
@@ -0,0 +1,32 @@
|
||||
resource "openstack_compute_instance_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
|
||||
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
|
||||
flavor_name = "${var.flavor}"
|
||||
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
|
||||
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
|
||||
|
||||
network {
|
||||
name = "${openstack_networking_network_v2.internal.name}"
|
||||
fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
# This is something provided to us by Enix when our tenant was provisioned.
|
||||
pool = "Public Floating"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}"
|
||||
instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}"
|
||||
fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
}
|
||||
|
||||
output "ip_addresses" {
|
||||
value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}"
|
||||
}
|
||||
|
||||
variable "flavor" {}
|
||||
23
prepare-vms/terraform/network.tf
Normal file
23
prepare-vms/terraform/network.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
resource "openstack_networking_network_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
network_id = "${openstack_networking_network_v2.internal.id}"
|
||||
cidr = "10.10.0.0/16"
|
||||
ip_version = 4
|
||||
dns_nameservers = ["1.1.1.1"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "router" {
|
||||
name = "${var.prefix}"
|
||||
external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "router_internal" {
|
||||
router_id = "${openstack_networking_router_v2.router.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.internal.id}"
|
||||
}
|
||||
|
||||
|
||||
13
prepare-vms/terraform/provider.tf
Normal file
13
prepare-vms/terraform/provider.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
provider "openstack" {
|
||||
user_name = "${var.user}"
|
||||
tenant_name = "${var.tenant}"
|
||||
domain_name = "${var.domain}"
|
||||
password = "${var.password}"
|
||||
auth_url = "${var.auth_url}"
|
||||
}
|
||||
|
||||
variable "user" {}
|
||||
variable "tenant" {}
|
||||
variable "domain" {}
|
||||
variable "password" {}
|
||||
variable "auth_url" {}
|
||||
12
prepare-vms/terraform/secgroup.tf
Normal file
12
prepare-vms/terraform/secgroup.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
resource "openstack_networking_secgroup_v2" "full_access" {
|
||||
name = "${var.prefix} - full access"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "full_access" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = ""
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.full_access.id}"
|
||||
}
|
||||
|
||||
8
prepare-vms/terraform/vars.tf
Normal file
8
prepare-vms/terraform/vars.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
variable "prefix" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get the script's real directory, whether we're being called directly or via a symlink
|
||||
# Get the script's real directory.
|
||||
# This should work whether we're being called directly or via a symlink.
|
||||
if [ -L "$0" ]; then
|
||||
export SCRIPT_DIR=$(dirname $(readlink "$0"))
|
||||
else
|
||||
export SCRIPT_DIR=$(dirname "$0")
|
||||
fi
|
||||
|
||||
# Load all scriptlets
|
||||
# Load all scriptlets.
|
||||
cd "$SCRIPT_DIR"
|
||||
for lib in lib/*.sh; do
|
||||
. $lib
|
||||
done
|
||||
|
||||
TRAINER_IMAGE="preparevms_prepare-vms"
|
||||
|
||||
DEPENDENCIES="
|
||||
aws
|
||||
ssh
|
||||
@@ -25,49 +24,26 @@ DEPENDENCIES="
|
||||
man
|
||||
"
|
||||
|
||||
ENVVARS="
|
||||
AWS_ACCESS_KEY_ID
|
||||
AWS_SECRET_ACCESS_KEY
|
||||
AWS_DEFAULT_REGION
|
||||
SSH_AUTH_SOCK
|
||||
"
|
||||
# Check for missing dependencies, and issue a warning if necessary.
|
||||
missing=0
|
||||
for dependency in $DEPENDENCIES; do
|
||||
if ! command -v $dependency >/dev/null; then
|
||||
warning "Dependency $dependency could not be found."
|
||||
missing=1
|
||||
fi
|
||||
done
|
||||
if [ $missing = 1 ]; then
|
||||
warning "At least one dependency is missing. Install it or try the image wrapper."
|
||||
fi
|
||||
|
||||
check_envvars() {
|
||||
status=0
|
||||
for envvar in $ENVVARS; do
|
||||
if [ -z "${!envvar}" ]; then
|
||||
error "Environment variable $envvar is not set."
|
||||
if [ "$envvar" = "SSH_AUTH_SOCK" ]; then
|
||||
error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
return $status
|
||||
}
|
||||
# Check if SSH_AUTH_SOCK is set.
|
||||
# (If it's not, deployment will almost certainly fail.)
|
||||
if [ -z "${SSH_AUTH_SOCK}" ]; then
|
||||
warning "Environment variable SSH_AUTH_SOCK is not set."
|
||||
warning "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
|
||||
check_dependencies() {
|
||||
status=0
|
||||
for dependency in $DEPENDENCIES; do
|
||||
if ! command -v $dependency >/dev/null; then
|
||||
warning "Dependency $dependency could not be found."
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
return $status
|
||||
}
|
||||
|
||||
check_image() {
|
||||
docker inspect $TRAINER_IMAGE >/dev/null 2>&1
|
||||
}
|
||||
|
||||
check_envvars \
|
||||
|| die "Please set all required environment variables."
|
||||
|
||||
check_dependencies \
|
||||
|| warning "At least one dependency is missing. Install it or try the image wrapper."
|
||||
|
||||
# Now check which command was invoked and execute it
|
||||
# Now check which command was invoked and execute it.
|
||||
if [ "$1" ]; then
|
||||
cmd="$1"
|
||||
shift
|
||||
@@ -77,6 +53,3 @@ fi
|
||||
fun=_cmd_$cmd
|
||||
type -t $fun | grep -q function || die "Invalid command: $cmd"
|
||||
$fun "$@"
|
||||
|
||||
# export SSH_AUTH_DIRNAME=$(dirname $SSH_AUTH_SOCK)
|
||||
# docker-compose run prepare-vms "$@"
|
||||
|
||||
4
slides/Dockerfile
Normal file
4
slides/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM alpine:3.11
|
||||
RUN apk add --no-cache entr py3-pip git zip
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt
|
||||
@@ -34,6 +34,14 @@ compile each `foo.yml` file into `foo.yml.html`.
|
||||
You can also run `./build.sh forever`: it will monitor the current
|
||||
directory and rebuild slides automatically when files are modified.
|
||||
|
||||
If you have problems running `./build.sh` (because of
|
||||
Python dependencies or whatever),
|
||||
you can also run `docker-compose up` in this directory.
|
||||
It will start the `./build.sh forever` script in a container.
|
||||
It will also start a web server exposing the slides
|
||||
(but the slides should also work if you load them from your
|
||||
local filesystem).
|
||||
|
||||
|
||||
## Publishing pipeline
|
||||
|
||||
@@ -53,4 +61,4 @@ You can run `./slidechecker foo.yml.html` to check for
|
||||
missing images and show the number of slides in that deck.
|
||||
It requires `phantomjs` to be installed. It takes some
|
||||
time to run so it is not yet integrated with the publishing
|
||||
pipeline.
|
||||
pipeline.
|
||||
|
||||
1
slides/_redirects
Normal file
1
slides/_redirects
Normal file
@@ -0,0 +1 @@
|
||||
/ /kube-fullday.yml.html 200!
|
||||
@@ -19,6 +19,9 @@ logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
# This one is not a constant. It's an ugly global.
|
||||
IPADDR = None
|
||||
|
||||
|
||||
class State(object):
|
||||
|
||||
@@ -26,6 +29,10 @@ class State(object):
|
||||
self.interactive = True
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.switch_desktop = False
|
||||
self.sync_slides = False
|
||||
self.open_links = False
|
||||
self.run_hidden = True
|
||||
self.slide = 1
|
||||
self.snippet = 0
|
||||
|
||||
@@ -34,6 +41,10 @@ class State(object):
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
self.switch_desktop = bool(data["switch_desktop"])
|
||||
self.sync_slides = bool(data["sync_slides"])
|
||||
self.open_links = bool(data["open_links"])
|
||||
self.run_hidden = bool(data["run_hidden"])
|
||||
self.slide = int(data["slide"])
|
||||
self.snippet = int(data["snippet"])
|
||||
|
||||
@@ -43,6 +54,10 @@ class State(object):
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
switch_desktop=self.switch_desktop,
|
||||
sync_slides=self.sync_slides,
|
||||
open_links=self.open_links,
|
||||
run_hidden=self.run_hidden,
|
||||
slide=self.slide,
|
||||
snippet=self.snippet,
|
||||
), f, default_flow_style=False)
|
||||
@@ -119,14 +134,20 @@ class Slide(object):
|
||||
|
||||
|
||||
def focus_slides():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "3"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_terminal():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "2"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_browser():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "4"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
@@ -163,6 +184,9 @@ def wait_for_prompt():
|
||||
last_line = output.split('\n')[-1]
|
||||
# Our custom prompt on the VMs has two lines; the 2nd line is just '$'
|
||||
if last_line == "$":
|
||||
# This is a perfect opportunity to grab the node's IP address
|
||||
global IPADDR
|
||||
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
return
|
||||
# When we are in an alpine container, the prompt will be "/ #"
|
||||
if last_line == "/ #":
|
||||
@@ -199,7 +223,7 @@ def check_exit_status():
|
||||
def setup_tmux_and_ssh():
|
||||
if subprocess.call(["tmux", "has-session"]):
|
||||
logging.error("Couldn't connect to tmux. Please setup tmux first.")
|
||||
ipaddr = open("../../prepare-vms/ips.txt").read().split("\n")[0]
|
||||
ipaddr = "$IPADDR"
|
||||
uid = os.getuid()
|
||||
|
||||
raise Exception("""
|
||||
@@ -301,17 +325,21 @@ while True:
|
||||
slide = slides[state.slide]
|
||||
snippet = slide.snippets[state.snippet-1] if state.snippet else None
|
||||
click.clear()
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}]"
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
|
||||
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
|
||||
.format(state.slide, len(slides)-1,
|
||||
state.snippet, len(slide.snippets) if slide.snippets else 0,
|
||||
state.simulate_type, state.verify_status))
|
||||
state.simulate_type, state.verify_status,
|
||||
state.switch_desktop, state.sync_slides,
|
||||
state.open_links, state.run_hidden))
|
||||
print(hrule())
|
||||
if snippet:
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
focus_terminal()
|
||||
else:
|
||||
print(slide.content)
|
||||
subprocess.check_output(["./gotoslide.js", str(slide.number)])
|
||||
if state.sync_slides:
|
||||
subprocess.check_output(["./gotoslide.js", str(slide.number)])
|
||||
focus_slides()
|
||||
print(hrule())
|
||||
if state.interactive:
|
||||
@@ -320,6 +348,10 @@ while True:
|
||||
print("n/→ Next")
|
||||
print("s Simulate keystrokes")
|
||||
print("v Validate exit status")
|
||||
print("d Switch desktop")
|
||||
print("k Sync slides")
|
||||
print("o Open links")
|
||||
print("h Run hidden commands")
|
||||
print("g Go to a specific slide")
|
||||
print("q Quit")
|
||||
print("c Continue non-interactively until next error")
|
||||
@@ -335,6 +367,14 @@ while True:
|
||||
state.simulate_type = not state.simulate_type
|
||||
elif command == "v":
|
||||
state.verify_status = not state.verify_status
|
||||
elif command == "d":
|
||||
state.switch_desktop = not state.switch_desktop
|
||||
elif command == "k":
|
||||
state.sync_slides = not state.sync_slides
|
||||
elif command == "o":
|
||||
state.open_links = not state.open_links
|
||||
elif command == "h":
|
||||
state.run_hidden = not state.run_hidden
|
||||
elif command == "g":
|
||||
state.slide = click.prompt("Enter slide number", type=int)
|
||||
state.snippet = 0
|
||||
@@ -360,7 +400,7 @@ while True:
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash":
|
||||
elif method == "bash" or (method == "hide" and state.run_hidden):
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
@@ -397,14 +437,14 @@ while True:
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
ipaddr = re.findall("^\[(.*)\]", screen, re.MULTILINE)[-1]
|
||||
url = data.replace("/node1", "/{}".format(ipaddr))
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
move_forward()
|
||||
|
||||
1
slides/autopilot/requirements.txt
Normal file
1
slides/autopilot/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
click
|
||||
@@ -1,6 +1,8 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
case "$1" in
|
||||
once)
|
||||
./index.py
|
||||
for YAML in *.yml; do
|
||||
./markmaker.py $YAML > $YAML.html || {
|
||||
rm $YAML.html
|
||||
@@ -12,9 +14,17 @@ once)
|
||||
./appendcheck.py $YAML.html
|
||||
done
|
||||
fi
|
||||
zip -qr slides.zip . && echo "Created slides.zip archive."
|
||||
;;
|
||||
|
||||
forever)
|
||||
set +e
|
||||
# check if entr is installed
|
||||
if ! command -v entr >/dev/null; then
|
||||
echo >&2 "First install 'entr' with apt, brew, etc."
|
||||
exit
|
||||
fi
|
||||
|
||||
# There is a weird bug in entr, at least on MacOS,
|
||||
# where it doesn't restore the terminal to a clean
|
||||
# state when exitting. So let's try to work around
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
## About these slides
|
||||
|
||||
- All the content is available in a public GitHub repository:
|
||||
|
||||
https://github.com/jpetazzo/container.training
|
||||
|
||||
- You can get updated "builds" of the slides there:
|
||||
|
||||
http://container.training/
|
||||
|
||||
<!--
|
||||
.exercise[
|
||||
```open https://github.com/jpetazzo/container.training```
|
||||
```open http://container.training/```
|
||||
]
|
||||
-->
|
||||
|
||||
--
|
||||
|
||||
- Typos? Mistakes? Questions? Feel free to hover over the bottom of the slide ...
|
||||
|
||||
.footnote[.emoji[👇] Try it! The source file will be shown and you can view it on GitHub and fork and edit it.]
|
||||
|
||||
<!--
|
||||
.exercise[
|
||||
```open https://github.com/jpetazzo/container.training/tree/master/slides/common/about-slides.md```
|
||||
]
|
||||
-->
|
||||
@@ -1,11 +0,0 @@
|
||||
class: title, self-paced
|
||||
|
||||
Thank you!
|
||||
|
||||
---
|
||||
|
||||
class: title, in-person
|
||||
|
||||
That's all, folks! <br/> Questions?
|
||||
|
||||

|
||||
@@ -1,3 +1,6 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Advanced Dockerfiles
|
||||
|
||||

|
||||
@@ -34,18 +37,6 @@ In this section, we will see more Dockerfile commands.
|
||||
|
||||
---
|
||||
|
||||
## The `MAINTAINER` instruction
|
||||
|
||||
The `MAINTAINER` instruction tells you who wrote the `Dockerfile`.
|
||||
|
||||
```dockerfile
|
||||
MAINTAINER Docker Education Team <education@docker.com>
|
||||
```
|
||||
|
||||
It's optional but recommended.
|
||||
|
||||
---
|
||||
|
||||
## The `RUN` instruction
|
||||
|
||||
The `RUN` instruction can be specified in two ways.
|
||||
@@ -94,8 +85,6 @@ RUN apt-get update && apt-get install -y wget && apt-get clean
|
||||
|
||||
It is also possible to break a command onto multiple lines:
|
||||
|
||||
It is possible to execute multiple commands in a single step:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y wget \
|
||||
@@ -369,7 +358,7 @@ class: extra-details
|
||||
|
||||
## Overriding the `ENTRYPOINT` instruction
|
||||
|
||||
The entry point can be overriden as well.
|
||||
The entry point can be overridden as well.
|
||||
|
||||
```bash
|
||||
$ docker run -it training/ls
|
||||
@@ -430,5 +419,4 @@ ONBUILD COPY . /src
|
||||
```
|
||||
|
||||
* You can't chain `ONBUILD` instructions with `ONBUILD`.
|
||||
* `ONBUILD` can't be used to trigger `FROM` and `MAINTAINER`
|
||||
instructions.
|
||||
* `ONBUILD` can't be used to trigger `FROM` instructions.
|
||||
@@ -40,6 +40,8 @@ ambassador containers.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
@@ -154,6 +156,36 @@ Different deployments will use different underlying technologies.
|
||||
|
||||
---
|
||||
|
||||
## Service meshes
|
||||
|
||||
* A service mesh is a configurable network layer.
|
||||
|
||||
* It can provide service discovery, high availability, load balancing, observability...
|
||||
|
||||
* Service meshes are particularly useful for microservices applications.
|
||||
|
||||
* Service meshes are often implemented as proxies.
|
||||
|
||||
* Applications connect to the service mesh, which relays the connection where needed.
|
||||
|
||||
*Does that sound familiar?*
|
||||
|
||||
---
|
||||
|
||||
## Ambassadors and service meshes
|
||||
|
||||
* When using a service mesh, a "sidecar container" is often used as a proxy
|
||||
|
||||
* Our services connect (transparently) to that sidecar container
|
||||
|
||||
* That sidecar container figures out where to forward the traffic
|
||||
|
||||
... Does that sound familiar?
|
||||
|
||||
(It should, because service meshes are essentially app-wide or cluster-wide ambassadors!)
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
@@ -166,3 +198,10 @@ For more information about the ambassador pattern, including demos on Swarm and
|
||||
|
||||
* [SwarmWeek video about Swarm+Compose](https://youtube.com/watch?v=qbIvUvwa6As)
|
||||
|
||||
Some services meshes and related projects:
|
||||
|
||||
* [Istio](https://istio.io/)
|
||||
|
||||
* [Linkerd](https://linkerd.io/)
|
||||
|
||||
* [Gloo](https://gloo.solo.io/)
|
||||
201
slides/containers/Application_Configuration.md
Normal file
201
slides/containers/Application_Configuration.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Application Configuration
|
||||
|
||||
There are many ways to provide configuration to containerized applications.
|
||||
|
||||
There is no "best way" — it depends on factors like:
|
||||
|
||||
* configuration size,
|
||||
|
||||
* mandatory and optional parameters,
|
||||
|
||||
* scope of configuration (per container, per app, per customer, per site, etc),
|
||||
|
||||
* frequency of changes in the configuration.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/hamba 80 www1:80 www2:80
|
||||
```
|
||||
|
||||
* Configuration is provided through command-line parameters.
|
||||
|
||||
* In the above example, the `ENTRYPOINT` is a script that will:
|
||||
|
||||
- parse the parameters,
|
||||
|
||||
- generate a configuration file,
|
||||
|
||||
- start the actual service.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters pros and cons
|
||||
|
||||
* Appropriate for mandatory parameters (without which the service cannot start).
|
||||
|
||||
* Convenient for "toolbelt" services instantiated many times.
|
||||
|
||||
(Because there is no extra step: just run it!)
|
||||
|
||||
* Not great for dynamic configurations or bigger configurations.
|
||||
|
||||
(These things are still possible, but more cumbersome.)
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
```bash
|
||||
docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
|
||||
```
|
||||
|
||||
* Configuration is provided through environment variables.
|
||||
|
||||
* The environment variable can be used straight by the program,
|
||||
<br/>or by a script generating a configuration file.
|
||||
|
||||
---
|
||||
|
||||
## Environment variables pros and cons
|
||||
|
||||
* Appropriate for optional parameters (since the image can provide default values).
|
||||
|
||||
* Also convenient for services instantiated many times.
|
||||
|
||||
(It's as easy as command-line parameters.)
|
||||
|
||||
* Great for services with lots of parameters, but you only want to specify a few.
|
||||
|
||||
(And use default values for everything else.)
|
||||
|
||||
* Ability to introspect possible parameters and their default values.
|
||||
|
||||
* Not great for dynamic configurations.
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration
|
||||
|
||||
```
|
||||
FROM prometheus
|
||||
COPY prometheus.conf /etc
|
||||
```
|
||||
|
||||
* The configuration is added to the image.
|
||||
|
||||
* The image may have a default configuration; the new configuration can:
|
||||
|
||||
- replace the default configuration,
|
||||
|
||||
- extend it (if the code can read multiple configuration files).
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to write a configuration file. (Obviously!)
|
||||
|
||||
* Requires to build an image to start the service.
|
||||
|
||||
* Requires to rebuild the image to reconfigure the service.
|
||||
|
||||
* Requires to rebuild the image to upgrade the service.
|
||||
|
||||
* Configured images can be stored in registries.
|
||||
|
||||
(Which is great, but requires a registry.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume
|
||||
|
||||
```bash
|
||||
docker run -v appconfig:/etc/appconfig myapp
|
||||
```
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The volume is attached to the container.
|
||||
|
||||
* The image may have a default configuration.
|
||||
|
||||
(But this results in a less "obvious" setup, that needs more documentation.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to create a volume for each different configuration.
|
||||
|
||||
* Services with identical configurations can use the same volume.
|
||||
|
||||
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
|
||||
|
||||
* Configuration can be generated or edited through another container.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume
|
||||
|
||||
* This is a powerful pattern for dynamic, complex configurations.
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The configuration is generated / updated by a special container.
|
||||
|
||||
* The application container detects when the configuration is changed.
|
||||
|
||||
(And automatically reloads the configuration when necessary.)
|
||||
|
||||
* The configuration can be shared between multiple services if needed.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume example
|
||||
|
||||
In a first terminal, start a load balancer with an initial configuration:
|
||||
|
||||
```bash
|
||||
$ docker run --name loadbalancer jpetazzo/hamba \
|
||||
80 goo.gl:80
|
||||
```
|
||||
|
||||
In another terminal, reconfigure that load balancer:
|
||||
|
||||
```bash
|
||||
$ docker run --rm --volumes-from loadbalancer jpetazzo/hamba reconfigure \
|
||||
80 google.com:80
|
||||
```
|
||||
|
||||
The configuration could also be updated through e.g. a REST API.
|
||||
|
||||
(The REST API being itself served from another container.)
|
||||
|
||||
---
|
||||
|
||||
## Keeping secrets
|
||||
|
||||
.warning[Ideally, you should not put secrets (passwords, tokens...) in:]
|
||||
|
||||
* command-line or environment variables (anyone with Docker API access can get them),
|
||||
|
||||
* images, especially stored in a registry.
|
||||
|
||||
Secrets management is better handled with an orchestrator (like Swarm or Kubernetes).
|
||||
|
||||
Orchestrators will allow to pass secrets in a "one-way" manner.
|
||||
|
||||
Managing secrets securely without an orchestrator can be contrived.
|
||||
|
||||
E.g.:
|
||||
|
||||
- read the secret on stdin when the service starts,
|
||||
|
||||
- pass the secret using an API endpoint.
|
||||
@@ -117,7 +117,7 @@ CONTAINER ID IMAGE ... CREATED STATUS ...
|
||||
|
||||
Many Docker commands will work on container IDs: `docker stop`, `docker rm`...
|
||||
|
||||
If we want to list only the IDs of our containers (without the other colums
|
||||
If we want to list only the IDs of our containers (without the other columns
|
||||
or the header line),
|
||||
we can use the `-q` ("Quiet", "Quick") flag:
|
||||
|
||||
@@ -144,6 +144,10 @@ At a first glance, it looks like this would be particularly useful in scripts.
|
||||
However, if we want to start a container and get its ID in a reliable way,
|
||||
it is better to use `docker run -d`, which we will cover in a bit.
|
||||
|
||||
(Using `docker ps -lq` is prone to race conditions: what happens if someone
|
||||
else, or another program or script, starts another container just before
|
||||
we run `docker ps -lq`?)
|
||||
|
||||
---
|
||||
|
||||
## View the logs of a container
|
||||
@@ -93,20 +93,22 @@ The output of `docker build` looks like this:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Sending build context to Docker daemon
|
||||
Step 0 : FROM ubuntu
|
||||
---> e54ca5efa2e9
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Step 2 : RUN apt-get install figlet
|
||||
---> Running in 2b44df762a2f
|
||||
---> f9e8f1642759
|
||||
Removing intermediate container 2b44df762a2f
|
||||
Successfully built f9e8f1642759
|
||||
docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048kB
|
||||
Step 1/3 : FROM ubuntu
|
||||
---> f975c5035748
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
Step 3/3 : RUN apt-get install figlet
|
||||
---> Running in c29230d70f9b
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container c29230d70f9b
|
||||
---> 0dfd7a253f21
|
||||
Successfully built 0dfd7a253f21
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
]
|
||||
|
||||
@@ -129,25 +131,31 @@ Sending build context to Docker daemon 2.048 kB
|
||||
|
||||
* Be careful (or patient) if that directory is big and your link is slow.
|
||||
|
||||
* You can speed up the process with a [`.dockerignore`](https://docs.docker.com/engine/reference/builder/#dockerignore-file) file
|
||||
|
||||
* It tells docker to ignore specific files in the directory
|
||||
|
||||
* Only ignore files that you won't need in the build context!
|
||||
|
||||
---
|
||||
|
||||
## Executing each step
|
||||
|
||||
```bash
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
```
|
||||
|
||||
* A container (`840cb3533193`) is created from the base image.
|
||||
* A container (`e01b294dbffd`) is created from the base image.
|
||||
|
||||
* The `RUN` command is executed in this container.
|
||||
|
||||
* The container is committed into an image (`7257c37726a1`).
|
||||
* The container is committed into an image (`eb8d9b561b37`).
|
||||
|
||||
* The build container (`840cb3533193`) is removed.
|
||||
* The build container (`e01b294dbffd`) is removed.
|
||||
|
||||
* The output of this step will be the base image for the next one.
|
||||
|
||||
@@ -64,6 +64,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 042dff3b4a8d
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -165,6 +166,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 36f588918d73
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -223,6 +225,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 6e0b6a048a07
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
Run it without parameters:
|
||||
@@ -49,7 +49,7 @@ Before diving in, let's see a small example of Compose in action.
|
||||
|
||||
---
|
||||
|
||||
## Compose in action
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
@@ -60,6 +60,10 @@ Before diving in, let's see a small example of Compose in action.
|
||||
If you are using the official training virtual machines, Compose has been
|
||||
pre-installed.
|
||||
|
||||
If you are using Docker for Mac/Windows or the Docker Toolbox, Compose comes with them.
|
||||
|
||||
If you are on Linux (desktop or server environment), you will need to install Compose from its [release page](https://github.com/docker/compose/releases) or with `pip install docker-compose`.
|
||||
|
||||
You can always check that it is installed by running:
|
||||
|
||||
```bash
|
||||
@@ -74,7 +78,7 @@ First step: clone the source code for the app we will be working on.
|
||||
|
||||
```bash
|
||||
$ cd
|
||||
$ git clone git://github.com/jpetazzo/trainingwheels
|
||||
$ git clone https://github.com/jpetazzo/trainingwheels
|
||||
...
|
||||
$ cd trainingwheels
|
||||
```
|
||||
@@ -135,22 +139,33 @@ services:
|
||||
|
||||
---
|
||||
|
||||
## Compose file versions
|
||||
## Compose file structure
|
||||
|
||||
Version 1 directly has the various containers (`www`, `redis`...) at the top level of the file.
|
||||
A Compose file has multiple sections:
|
||||
|
||||
Version 2 has multiple sections:
|
||||
* `version` is mandatory. (We should use `"2"` or later; version 1 is deprecated.)
|
||||
|
||||
* `version` is mandatory and should be `"2"`.
|
||||
|
||||
* `services` is mandatory and corresponds to the content of the version 1 format.
|
||||
* `services` is mandatory. A service is one or more replicas of the same image running as containers.
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-app network.)
|
||||
<br/>(By default, containers will be connected on a private, per-compose-file network.)
|
||||
|
||||
* `volumes` is optional and can define volumes to be used and/or shared by the containers.
|
||||
|
||||
Version 3 adds support for deployment options (scaling, rolling updates, etc.)
|
||||
---
|
||||
|
||||
## Compose file versions
|
||||
|
||||
* Version 1 is legacy and shouldn't be used.
|
||||
|
||||
(If you see a Compose file without `version` and `services`, it's a legacy v1 file.)
|
||||
|
||||
* Version 2 added support for networks and volumes.
|
||||
|
||||
* Version 3 added support for deployment options (scaling, rolling updates, etc).
|
||||
|
||||
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
|
||||
has excellent information about the Compose file format if you need to know more about versions.
|
||||
|
||||
---
|
||||
|
||||
@@ -260,6 +275,8 @@ Removing trainingwheels_www_1 ... done
|
||||
Removing trainingwheels_redis_1 ... done
|
||||
```
|
||||
|
||||
Use `docker-compose down -v` to remove everything including volumes.
|
||||
|
||||
---
|
||||
|
||||
## Special handling of volumes
|
||||
207
slides/containers/Container_Engines.md
Normal file
207
slides/containers/Container_Engines.md
Normal file
@@ -0,0 +1,207 @@
|
||||
# Docker Engine and other container engines
|
||||
|
||||
* We are going to cover the architecture of the Docker Engine.
|
||||
|
||||
* We will also present other container engines.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||
* The Engine is a daemon (service running in the background).
|
||||
|
||||
* All interaction is done through a REST API exposed over a socket.
|
||||
|
||||
* On Linux, the default socket is a UNIX socket: `/var/run/docker.sock`.
|
||||
|
||||
* We can also use a TCP socket, with optional mutual TLS authentication.
|
||||
|
||||
* The `docker` CLI communicates with the Engine over the socket.
|
||||
|
||||
Note: strictly speaking, the Docker API is not fully REST.
|
||||
|
||||
Some operations (e.g. dealing with interactive containers
|
||||
and log streaming) don't fit the REST model.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||
* Up to Docker 1.10: the Docker Engine is one single monolithic binary.
|
||||
|
||||
* Starting with Docker 1.11, the Engine is split into multiple parts:
|
||||
|
||||
- `dockerd` (REST API, auth, networking, storage)
|
||||
|
||||
- `containerd` (container lifecycle, controlled over a gRPC API)
|
||||
|
||||
- `containerd-shim` (per-container; does almost nothing but allows to restart the Engine without restarting the containers)
|
||||
|
||||
- `runc` (per-container; does the actual heavy lifting to start the container)
|
||||
|
||||
* Some features (like image and snapshot management) are progressively being pushed from `dockerd` to `containerd`.
|
||||
|
||||
For more details, check [this short presentation by Phil Estes](https://www.slideshare.net/PhilEstes/diving-through-the-layers-investigating-runc-containerd-and-the-docker-engine-architecture).
|
||||
|
||||
---
|
||||
|
||||
## Other container engines
|
||||
|
||||
The following list is not exhaustive.
|
||||
|
||||
Furthermore, we limited the scope to Linux containers.
|
||||
|
||||
We can also find containers (or things that look like containers) on other platforms
|
||||
like Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## LXC
|
||||
|
||||
* The venerable ancestor (first released in 2008).
|
||||
|
||||
* Docker initially relied on it to execute containers.
|
||||
|
||||
* No daemon; no central API.
|
||||
|
||||
* Each container is managed by a `lxc-start` process.
|
||||
|
||||
* Each `lxc-start` process exposes a custom API over a local UNIX socket, allowing to interact with the container.
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## LXD
|
||||
|
||||
* Re-uses LXC code (through liblxc).
|
||||
|
||||
* Builds on top of LXC to offer a more modern experience.
|
||||
|
||||
* Daemon exposing a REST API.
|
||||
|
||||
* Can manage images, snapshots, migrations, networking, storage.
|
||||
|
||||
* "offers a user experience similar to virtual machines but using Linux containers instead."
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
* Compares to `containerd`.
|
||||
|
||||
* Daemon exposing a gRPC interface.
|
||||
|
||||
* Controlled using the CRI API (Container Runtime Interface defined by Kubernetes).
|
||||
|
||||
* Needs an underlying OCI runtime (e.g. runc).
|
||||
|
||||
* Handles storage, images, networking (through CNI plugins).
|
||||
|
||||
We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
---
|
||||
|
||||
## systemd
|
||||
|
||||
* "init" system (PID 1) in most modern Linux distributions.
|
||||
|
||||
* Offers tools like `systemd-nspawn` and `machinectl` to manage containers.
|
||||
|
||||
* `systemd-nspawn` is "In many ways it is similar to chroot(1), but more powerful".
|
||||
|
||||
* `machinectl` can interact with VMs and containers managed by systemd.
|
||||
|
||||
* Exposes a DBUS API.
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## Kata containers
|
||||
|
||||
* OCI-compliant runtime.
|
||||
|
||||
* Fusion of two projects: Intel Clear Containers and Hyper runV.
|
||||
|
||||
* Run each container in a lightweight virtual machine.
|
||||
|
||||
* Requires to run on bare metal *or* with nested virtualization.
|
||||
|
||||
---
|
||||
|
||||
## gVisor
|
||||
|
||||
* OCI-compliant runtime.
|
||||
|
||||
* Implements a subset of the Linux kernel system calls.
|
||||
|
||||
* Written in go, uses a smaller subset of system calls.
|
||||
|
||||
* Can be heavily sandboxed.
|
||||
|
||||
* Can run in two modes:
|
||||
|
||||
* KVM (requires bare metal or nested virtualization),
|
||||
|
||||
* ptrace (no requirement, but slower).
|
||||
|
||||
---
|
||||
|
||||
## Overall ...
|
||||
|
||||
* The Docker Engine is very developer-centric:
|
||||
|
||||
- easy to install
|
||||
|
||||
- easy to use
|
||||
|
||||
- no manual setup
|
||||
|
||||
- first-class image build and transfer
|
||||
|
||||
* As a result, it is a fantastic tool in development environments.
|
||||
|
||||
* On servers:
|
||||
|
||||
- Docker is a good default choice
|
||||
|
||||
- If you use Kubernetes, the engine doesn't matter
|
||||
@@ -65,9 +65,17 @@ eb0eeab782f4 host host
|
||||
|
||||
* A network is managed by a *driver*.
|
||||
|
||||
* All the drivers that we have seen before are available.
|
||||
* The built-in drivers include:
|
||||
|
||||
* A new multi-host driver, *overlay*, is available out of the box.
|
||||
* `bridge` (default)
|
||||
|
||||
* `none`
|
||||
|
||||
* `host`
|
||||
|
||||
* `macvlan`
|
||||
|
||||
* A multi-host driver, *overlay*, is available out of the box (for Swarm clusters).
|
||||
|
||||
* More drivers can be provided by plugins (OVS, VLAN...)
|
||||
|
||||
@@ -75,6 +83,8 @@ eb0eeab782f4 host host
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Differences with the CNI
|
||||
|
||||
* CNI = Container Network Interface
|
||||
@@ -87,6 +97,30 @@ eb0eeab782f4 host host
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Single container in a Docker network
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Two containers on a single Docker network
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Two containers on two Docker networks
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Creating a network
|
||||
|
||||
Let's create a network called `dev`.
|
||||
@@ -284,7 +318,7 @@ since we wiped out the old Redis container).
|
||||
|
||||
---
|
||||
|
||||
class: x-extra-details
|
||||
class: extra-details
|
||||
|
||||
## Names are *local* to each network
|
||||
|
||||
@@ -324,7 +358,7 @@ class: extra-details
|
||||
Create the `prod` network.
|
||||
|
||||
```bash
|
||||
$ docker create network prod
|
||||
$ docker network create prod
|
||||
5a41562fecf2d8f115bedc16865f7336232a04268bdf2bd816aecca01b68d50c
|
||||
```
|
||||
|
||||
@@ -434,7 +468,7 @@ When creating a network, extra options can be provided.
|
||||
|
||||
* `--internal` disables outbound traffic (the network won't have a default gateway).
|
||||
|
||||
* `--gateway` indicates which address to use for the gateway (when utbound traffic is allowed).
|
||||
* `--gateway` indicates which address to use for the gateway (when outbound traffic is allowed).
|
||||
|
||||
* `--subnet` (in CIDR notation) indicates the subnet to use.
|
||||
|
||||
@@ -472,11 +506,13 @@ b2887adeb5578a01fd9c55c435cad56bbbe802350711d2743691f95743680b09
|
||||
|
||||
* If containers span multiple hosts, we need an *overlay* network to connect them together.
|
||||
|
||||
* Docker ships with a default network plugin, `overlay`, implementing an overlay network leveraging VXLAN.
|
||||
* Docker ships with a default network plugin, `overlay`, implementing an overlay network leveraging
|
||||
VXLAN, *enabled with Swarm Mode*.
|
||||
|
||||
* Other plugins (Weave, Calico...) can provide overlay networks as well.
|
||||
|
||||
* Once you have an overlay network, *all the features that we've used in this chapter work identically.*
|
||||
* Once you have an overlay network, *all the features that we've used in this chapter work identically
|
||||
across multiple hosts.*
|
||||
|
||||
---
|
||||
|
||||
@@ -492,7 +528,7 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See http://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
See https://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
|
||||
---
|
||||
|
||||
@@ -514,13 +550,191 @@ General idea:
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Connecting and disconnecting dynamically
|
||||
|
||||
We've learned how to:
|
||||
* So far, we have specified which network to use when starting the container.
|
||||
|
||||
* Create private networks for groups of containers.
|
||||
* The Docker Engine also allows to connect and disconnect while the container runs.
|
||||
|
||||
* Assign IP addresses to containers.
|
||||
* This feature is exposed through the Docker API, and through two Docker CLI commands:
|
||||
|
||||
* Use container naming to implement service discovery.
|
||||
* `docker network connect <network> <container>`
|
||||
|
||||
* `docker network disconnect <network> <container>`
|
||||
|
||||
---
|
||||
|
||||
## Dynamically connecting to a network
|
||||
|
||||
* We have a container named `es` connected to a network named `dev`.
|
||||
|
||||
* Let's start a simple alpine container on the default network:
|
||||
|
||||
```bash
|
||||
$ docker run -ti alpine sh
|
||||
/ #
|
||||
```
|
||||
|
||||
* In this container, try to ping the `es` container:
|
||||
|
||||
```bash
|
||||
/ # ping es
|
||||
ping: bad address 'es'
|
||||
```
|
||||
|
||||
This doesn't work, but we will change that by connecting the container.
|
||||
|
||||
---
|
||||
|
||||
## Finding the container ID and connecting it
|
||||
|
||||
* Figure out the ID of our alpine container; here are two methods:
|
||||
|
||||
* looking at `/etc/hostname` in the container,
|
||||
|
||||
* running `docker ps -lq` on the host.
|
||||
|
||||
* Run the following command on the host:
|
||||
|
||||
```bash
|
||||
$ docker network connect dev `<container_id>`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking what we did
|
||||
|
||||
* Try again to `ping es` from the container.
|
||||
|
||||
* It should now work correctly:
|
||||
|
||||
```bash
|
||||
/ # ping es
|
||||
PING es (172.20.0.3): 56 data bytes
|
||||
64 bytes from 172.20.0.3: seq=0 ttl=64 time=0.376 ms
|
||||
64 bytes from 172.20.0.3: seq=1 ttl=64 time=0.130 ms
|
||||
^C
|
||||
```
|
||||
|
||||
* Interrupt it with Ctrl-C.
|
||||
|
||||
---
|
||||
|
||||
## Looking at the network setup in the container
|
||||
|
||||
We can look at the list of network interfaces with `ifconfig`, `ip a`, or `ip l`:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
/ # ip a
|
||||
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
|
||||
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
||||
inet 127.0.0.1/8 scope host lo
|
||||
valid_lft forever preferred_lft forever
|
||||
18: eth0@if19: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
|
||||
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
|
||||
valid_lft forever preferred_lft forever
|
||||
20: eth1@if21: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||
link/ether 02:42:ac:14:00:04 brd ff:ff:ff:ff:ff:ff
|
||||
inet 172.20.0.4/16 brd 172.20.255.255 scope global eth1
|
||||
valid_lft forever preferred_lft forever
|
||||
/ #
|
||||
```
|
||||
]
|
||||
|
||||
Each network connection is materialized with a virtual network interface.
|
||||
|
||||
As we can see, we can be connected to multiple networks at the same time.
|
||||
|
||||
---
|
||||
|
||||
## Disconnecting from a network
|
||||
|
||||
* Let's try the symmetrical command to disconnect the container:
|
||||
```bash
|
||||
$ docker network disconnect dev <container_id>
|
||||
```
|
||||
|
||||
* From now on, if we try to ping `es`, it will not resolve:
|
||||
```bash
|
||||
/ # ping es
|
||||
ping: bad address 'es'
|
||||
```
|
||||
|
||||
* Trying to ping the IP address directly won't work either:
|
||||
```bash
|
||||
/ # ping 172.20.0.3
|
||||
... (nothing happens until we interrupt it with Ctrl-C)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Network aliases are scoped per network
|
||||
|
||||
* Each network has its own set of network aliases.
|
||||
|
||||
* We saw this earlier: `es` resolves to different addresses in `dev` and `prod`.
|
||||
|
||||
* If we are connected to multiple networks, the resolver looks up names in each of them
|
||||
(as of Docker Engine 18.03, it is the connection order) and stops as soon as the name
|
||||
is found.
|
||||
|
||||
* Therefore, if we are connected to both `dev` and `prod`, resolving `es` will **not**
|
||||
give us the addresses of all the `es` services; but only the ones in `dev` or `prod`.
|
||||
|
||||
* However, we can lookup `es.dev` or `es.prod` if we need to.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Finding out about our networks and names
|
||||
|
||||
* We can do reverse DNS lookups on containers' IP addresses.
|
||||
|
||||
* If the IP address belongs to a network (other than the default bridge), the result will be:
|
||||
|
||||
```
|
||||
name-or-first-alias-or-container-id.network-name
|
||||
```
|
||||
|
||||
* Example:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run -ti --net prod --net-alias hello alpine
|
||||
/ # apk add --no-cache drill
|
||||
...
|
||||
OK: 5 MiB in 13 packages
|
||||
/ # ifconfig
|
||||
eth0 Link encap:Ethernet HWaddr 02:42:AC:15:00:03
|
||||
inet addr:`172.21.0.3` Bcast:172.21.255.255 Mask:255.255.0.0
|
||||
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
|
||||
...
|
||||
/ # drill -t ptr `3.0.21.172`.in-addr.arpa
|
||||
...
|
||||
;; ANSWER SECTION:
|
||||
3.0.21.172.in-addr.arpa. 600 IN PTR `hello.prod`.
|
||||
...
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Building with a custom network
|
||||
|
||||
* We can build a Dockerfile with a custom network with `docker build --network NAME`.
|
||||
|
||||
* This can be used to check that a build doesn't access the network.
|
||||
|
||||
(But keep in mind that most Dockerfiles will fail,
|
||||
<br/>because they need to install remote packages and dependencies!)
|
||||
|
||||
* This may be used to access an internal package repository.
|
||||
|
||||
(But try to use a multi-stage build instead, if possible!)
|
||||
@@ -49,14 +49,14 @@ We will use `docker ps`:
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32769->80/tcp, 0.0.0.0:32768->443/tcp ...
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32768->80/tcp ...
|
||||
```
|
||||
|
||||
|
||||
* The web server is running on ports 80 and 443 inside the container.
|
||||
* The web server is running on port 80 inside the container.
|
||||
|
||||
* Those ports are mapped to ports 32769 and 32768 on our Docker host.
|
||||
* This port is mapped to port 32768 on our Docker host.
|
||||
|
||||
We will explain the whys and hows of this port mapping.
|
||||
|
||||
@@ -81,7 +81,7 @@ Make sure to use the right port number if it is different
|
||||
from the example below:
|
||||
|
||||
```bash
|
||||
$ curl localhost:32769
|
||||
$ curl localhost:32768
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
@@ -91,6 +91,31 @@ $ curl localhost:32769
|
||||
|
||||
---
|
||||
|
||||
## How does Docker know which port to map?
|
||||
|
||||
* There is metadata in the image telling "this image has something on port 80".
|
||||
|
||||
* We can see that metadata with `docker inspect`:
|
||||
|
||||
```bash
|
||||
$ docker inspect --format '{{.Config.ExposedPorts}}' nginx
|
||||
map[80/tcp:{}]
|
||||
```
|
||||
|
||||
* This metadata was set in the Dockerfile, with the `EXPOSE` keyword.
|
||||
|
||||
* We can see that with `docker history`:
|
||||
|
||||
```bash
|
||||
$ docker history nginx
|
||||
IMAGE CREATED CREATED BY
|
||||
7f70b30f2cc6 11 days ago /bin/sh -c #(nop) CMD ["nginx" "-g" "…
|
||||
<missing> 11 days ago /bin/sh -c #(nop) STOPSIGNAL [SIGTERM]
|
||||
<missing> 11 days ago /bin/sh -c #(nop) EXPOSE 80/tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Why are we mapping ports?
|
||||
|
||||
* We are out of IPv4 addresses.
|
||||
@@ -113,7 +138,7 @@ There is a command to help us:
|
||||
|
||||
```bash
|
||||
$ docker port <containerID> 80
|
||||
32769
|
||||
32768
|
||||
```
|
||||
|
||||
---
|
||||
@@ -128,7 +153,7 @@ $ docker run -d -p 8000:80 nginx
|
||||
$ docker run -d -p 8080:80 -p 8888:80 nginx
|
||||
```
|
||||
|
||||
* We are running two NGINX web servers.
|
||||
* We are running three NGINX web servers.
|
||||
* The first one is exposed on port 80.
|
||||
* The second one is exposed on port 8000.
|
||||
* The third one is exposed on ports 8080 and 8888.
|
||||
3
slides/containers/Containers_From_Scratch.md
Normal file
3
slides/containers/Containers_From_Scratch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Building containers from scratch
|
||||
|
||||
(This is a "bonus section" done if time permits.)
|
||||
339
slides/containers/Copy_On_Write.md
Normal file
339
slides/containers/Copy_On_Write.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Copy-on-write filesystems
|
||||
|
||||
Container engines rely on copy-on-write to be able
|
||||
to start containers quickly, regardless of their size.
|
||||
|
||||
We will explain how that works, and review some of
|
||||
the copy-on-write storage systems available on Linux.
|
||||
|
||||
---
|
||||
|
||||
## What is copy-on-write?
|
||||
|
||||
- Copy-on-write is a mechanism allowing to share data.
|
||||
|
||||
- The data appears to be a copy, but is only
|
||||
a link (or reference) to the original data.
|
||||
|
||||
- The actual copy happens only when someone
|
||||
tries to change the shared data.
|
||||
|
||||
- Whoever changes the shared data ends up
|
||||
using their own copy instead of the shared data.
|
||||
|
||||
---
|
||||
|
||||
## A few metaphors
|
||||
|
||||
--
|
||||
|
||||
- First metaphor:
|
||||
<br/>white board and tracing paper
|
||||
|
||||
--
|
||||
|
||||
- Second metaphor:
|
||||
<br/>magic books with shadowy pages
|
||||
|
||||
--
|
||||
|
||||
- Third metaphor:
|
||||
<br/>just-in-time house building
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write is *everywhere*
|
||||
|
||||
- Process creation with `fork()`.
|
||||
|
||||
- Consistent disk snapshots.
|
||||
|
||||
- Efficient VM provisioning.
|
||||
|
||||
- And, of course, containers.
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write and containers
|
||||
|
||||
Copy-on-write is essential to give us "convenient" containers.
|
||||
|
||||
- Creating a new container (from an existing image) is "free".
|
||||
|
||||
(Otherwise, we would have to copy the image first.)
|
||||
|
||||
- Customizing a container (by tweaking a few files) is cheap.
|
||||
|
||||
(Adding a 1 KB configuration file to a 1 GB container takes 1 KB, not 1 GB.)
|
||||
|
||||
- We can take snapshots, i.e. have "checkpoints" or "save points"
|
||||
when building images.
|
||||
|
||||
---
|
||||
|
||||
## AUFS overview
|
||||
|
||||
- The original (legacy) copy-on-write filesystem used by first versions of Docker.
|
||||
|
||||
- Combine multiple *branches* in a specific order.
|
||||
|
||||
- Each branch is just a normal directory.
|
||||
|
||||
- You generally have:
|
||||
|
||||
- at least one read-only branch (at the bottom),
|
||||
|
||||
- exactly one read-write branch (at the top).
|
||||
|
||||
(But other fun combinations are possible too!)
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: opening a file
|
||||
|
||||
- With `O_RDONLY` - read-only access:
|
||||
|
||||
- look it up in each branch, starting from the top
|
||||
|
||||
- open the first one we find
|
||||
|
||||
- With `O_WRONLY` or `O_RDWR` - write access:
|
||||
|
||||
- if the file exists on the top branch: open it
|
||||
|
||||
- if the file exists on another branch: "copy up"
|
||||
<br/>
|
||||
(i.e. copy the file to the top branch and open the copy)
|
||||
|
||||
- if the file doesn't exist on any branch: create it on the top branch
|
||||
|
||||
That "copy-up" operation can take a while if the file is big!
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: deleting a file
|
||||
|
||||
- A *whiteout* file is created.
|
||||
|
||||
- This is similar to the concept of "tombstones" used in some data systems.
|
||||
|
||||
```
|
||||
# docker run ubuntu rm /etc/shadow
|
||||
|
||||
# ls -la /var/lib/docker/aufs/diff/$(docker ps --no-trunc -lq)/etc
|
||||
total 8
|
||||
drwxr-xr-x 2 root root 4096 Jan 27 15:36 .
|
||||
drwxr-xr-x 5 root root 4096 Jan 27 15:36 ..
|
||||
-r--r--r-- 2 root root 0 Jan 27 15:36 .wh.shadow
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## AUFS performance
|
||||
|
||||
- AUFS `mount()` is fast, so creation of containers is quick.
|
||||
|
||||
- Read/write access has native speeds.
|
||||
|
||||
- But initial `open()` is expensive in two scenarios:
|
||||
|
||||
- when writing big files (log files, databases ...),
|
||||
|
||||
- when searching many directories (PATH, classpath, etc.) over many layers.
|
||||
|
||||
- Protip: when we built dotCloud, we ended up putting
|
||||
all important data on *volumes*.
|
||||
|
||||
- When starting the same container multiple times:
|
||||
|
||||
- the data is loaded only once from disk, and cached only once in memory;
|
||||
|
||||
- but `dentries` will be duplicated.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper
|
||||
|
||||
Device Mapper is a rich subsystem with many features.
|
||||
|
||||
It can be used for: RAID, encrypted devices, snapshots, and more.
|
||||
|
||||
In the context of containers (and Docker in particular), "Device Mapper"
|
||||
means:
|
||||
|
||||
"the Device Mapper system + its *thin provisioning target*"
|
||||
|
||||
If you see the abbreviation "thinp" it stands for "thin provisioning".
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper principles
|
||||
|
||||
- Copy-on-write happens on the *block* level
|
||||
(instead of the *file* level).
|
||||
|
||||
- Each container and each image get their own block device.
|
||||
|
||||
- At any given time, it is possible to take a snapshot:
|
||||
|
||||
- of an existing container (to create a frozen image),
|
||||
|
||||
- of an existing image (to create a container from it).
|
||||
|
||||
- If a block has never been written to:
|
||||
|
||||
- it's assumed to be all zeros,
|
||||
|
||||
- it's not allocated on disk.
|
||||
|
||||
(That last property is the reason for the name "thin" provisioning.)
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper operational details
|
||||
|
||||
- Two storage areas are needed:
|
||||
one for *data*, another for *metadata*.
|
||||
|
||||
- "data" is also called the "pool"; it's just a big pool of blocks.
|
||||
|
||||
(Docker uses the smallest possible block size, 64 KB.)
|
||||
|
||||
- "metadata" contains the mappings between virtual offsets (in the
|
||||
snapshots) and physical offsets (in the pool).
|
||||
|
||||
- Each time a new block (or a copy-on-write block) is written,
|
||||
a block is allocated from the pool.
|
||||
|
||||
- When there are no more blocks in the pool, attempts to write
|
||||
will stall until the pool is increased (or the write operation
|
||||
aborted).
|
||||
|
||||
- In other words: when running out of space, containers are
|
||||
frozen, but operations will resume as soon as space is available.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper performance
|
||||
|
||||
- By default, Docker puts data and metadata on a loop device
|
||||
backed by a sparse file.
|
||||
|
||||
- This is great from a usability point of view,
|
||||
since zero configuration is needed.
|
||||
|
||||
- But it is terrible from a performance point of view:
|
||||
|
||||
- each time a container writes to a new block,
|
||||
- a block has to be allocated from the pool,
|
||||
- and when it's written to,
|
||||
- a block has to be allocated from the sparse file,
|
||||
- and sparse file performance isn't great anyway.
|
||||
|
||||
- If you use Device Mapper, make sure to put data (and metadata)
|
||||
on devices!
|
||||
|
||||
---
|
||||
|
||||
## BTRFS principles
|
||||
|
||||
- BTRFS is a filesystem (like EXT4, XFS, NTFS...) with built-in snapshots.
|
||||
|
||||
- The "copy-on-write" happens at the filesystem level.
|
||||
|
||||
- BTRFS integrates the snapshot and block pool management features
|
||||
at the filesystem level.
|
||||
|
||||
(Instead of the block level for Device Mapper.)
|
||||
|
||||
- In practice, we create a "subvolume" and
|
||||
later take a "snapshot" of that subvolume.
|
||||
|
||||
Imagine: `mkdir` with Super Powers and `cp -a` with Super Powers.
|
||||
|
||||
- These operations can be executed with the `btrfs` CLI tool.
|
||||
|
||||
---
|
||||
|
||||
## BTRFS in practice with Docker
|
||||
|
||||
- Docker can use BTRFS and its snapshotting features to store container images.
|
||||
|
||||
- The only requirement is that `/var/lib/docker` is on a BTRFS filesystem.
|
||||
|
||||
(Or, the directory specified with the `--data-root` flag when starting the engine.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## BTRFS quirks
|
||||
|
||||
- BTRFS works by dividing its storage in *chunks*.
|
||||
|
||||
- A chunk can contain data or metadata.
|
||||
|
||||
- You can run out of chunks (and get `No space left on device`)
|
||||
even though `df` shows space available.
|
||||
|
||||
(Because chunks are only partially allocated.)
|
||||
|
||||
- Quick fix:
|
||||
|
||||
```
|
||||
# btrfs filesys balance start -dusage=1 /var/lib/docker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Overlay2
|
||||
|
||||
- Overlay2 is very similar to AUFS.
|
||||
|
||||
- However, it has been merged in "upstream" kernel.
|
||||
|
||||
- It is therefore available on all modern kernels.
|
||||
|
||||
(AUFS was available on Debian and Ubuntu, but required custom kernels on other distros.)
|
||||
|
||||
- It is simpler than AUFS (it can only have two branches, called "layers").
|
||||
|
||||
- The container engine abstracts this detail, so this is not a concern.
|
||||
|
||||
- Overlay2 storage drivers generally use hard links between layers.
|
||||
|
||||
- This improves `stat()` and `open()` performance, at the expense of inode usage.
|
||||
|
||||
---
|
||||
|
||||
## ZFS
|
||||
|
||||
- ZFS is similar to BTRFS (at least from a container user's perspective).
|
||||
|
||||
- Pros:
|
||||
|
||||
- high performance
|
||||
- high reliability (with e.g. data checksums)
|
||||
- optional data compression and deduplication
|
||||
|
||||
- Cons:
|
||||
|
||||
- high memory usage
|
||||
- not in upstream kernel
|
||||
|
||||
- It is available as a kernel module or through FUSE.
|
||||
|
||||
---
|
||||
|
||||
## Which one is the best?
|
||||
|
||||
- Eventually, overlay2 should be the best option.
|
||||
|
||||
- It is available on all modern systems.
|
||||
|
||||
- Its memory usage is better than Device Mapper, BTRFS, or ZFS.
|
||||
|
||||
- The remarks about *write performance* shouldn't bother you:
|
||||
<br/>
|
||||
data should always be stored in volumes anyway!
|
||||
|
||||
@@ -64,7 +64,7 @@ Create this Dockerfile.
|
||||
|
||||
## Testing our C program
|
||||
|
||||
* Create `hello.c` and `Dockerfile` in the same direcotry.
|
||||
* Create `hello.c` and `Dockerfile` in the same directory.
|
||||
|
||||
* Run `docker build -t hello .` in this directory.
|
||||
|
||||
@@ -93,7 +93,7 @@ Success!
|
||||
* Older Dockerfiles also have the `ADD` instruction.
|
||||
<br/>It is similar but can automatically extract archives.
|
||||
|
||||
* If we really wanted to compile C code in a compiler, we would:
|
||||
* If we really wanted to compile C code in a container, we would:
|
||||
|
||||
* Place it in a different directory, with the `WORKDIR` instruction.
|
||||
|
||||
@@ -10,10 +10,12 @@
|
||||
|
||||
* [Solaris Containers (2004)](https://en.wikipedia.org/wiki/Solaris_Containers)
|
||||
|
||||
* [FreeBSD jails (1999)](https://www.freebsd.org/cgi/man.cgi?query=jail&sektion=8&manpath=FreeBSD+4.0-RELEASE)
|
||||
* [FreeBSD jails (1999-2000)](https://www.freebsd.org/cgi/man.cgi?query=jail&sektion=8&manpath=FreeBSD+4.0-RELEASE)
|
||||
|
||||
Containers have been around for a *very long time* indeed.
|
||||
|
||||
(See [this excellent blog post by Serge Hallyn](https://s3hh.wordpress.com/2018/03/22/history-of-containers/) for more historic details.)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
81
slides/containers/Docker_Machine.md
Normal file
81
slides/containers/Docker_Machine.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Managing hosts with Docker Machine
|
||||
|
||||
- Docker Machine is a tool to provision and manage Docker hosts.
|
||||
|
||||
- It automates the creation of a virtual machine:
|
||||
|
||||
- locally, with a tool like VirtualBox or VMware;
|
||||
|
||||
- on a public cloud like AWS EC2, Azure, Digital Ocean, GCP, etc.;
|
||||
|
||||
- on a private cloud like OpenStack.
|
||||
|
||||
- It can also configure existing machines through an SSH connection.
|
||||
|
||||
- It can manage as many hosts as you want, with as many "drivers" as you want.
|
||||
|
||||
---
|
||||
|
||||
## Docker Machine workflow
|
||||
|
||||
1) Prepare the environment: setup VirtualBox, obtain cloud credentials ...
|
||||
|
||||
2) Create hosts with `docker-machine create -d drivername machinename`.
|
||||
|
||||
3) Use a specific machine with `eval $(docker-machine env machinename)`.
|
||||
|
||||
4) Profit!
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
- Most of the tools (CLI, libraries...) connecting to the Docker API can use environment variables.
|
||||
|
||||
- These variables are:
|
||||
|
||||
- `DOCKER_HOST` (indicates address+port to connect to, or path of UNIX socket)
|
||||
|
||||
- `DOCKER_TLS_VERIFY` (indicates that TLS mutual auth should be used)
|
||||
|
||||
- `DOCKER_CERT_PATH` (path to the keypair and certificate to use for auth)
|
||||
|
||||
- `docker-machine env ...` will generate the variables needed to connect to a host.
|
||||
|
||||
- `$(eval docker-machine env ...)` sets these variables in the current shell.
|
||||
|
||||
---
|
||||
|
||||
## Host management features
|
||||
|
||||
With `docker-machine`, we can:
|
||||
|
||||
- upgrade a host to the latest version of the Docker Engine,
|
||||
|
||||
- start/stop/restart hosts,
|
||||
|
||||
- get a shell on a remote machine (with SSH),
|
||||
|
||||
- copy files to/from remotes machines (with SCP),
|
||||
|
||||
- mount a remote host's directory on the local machine (with SSHFS),
|
||||
|
||||
- ...
|
||||
|
||||
---
|
||||
|
||||
## The `generic` driver
|
||||
|
||||
When provisioning a new host, `docker-machine` executes these steps:
|
||||
|
||||
1) Create the host using a cloud or hypervisor API.
|
||||
|
||||
2) Connect to the host over SSH.
|
||||
|
||||
3) Install and configure Docker on the host.
|
||||
|
||||
With the `generic` driver, we provide the IP address of an existing host
|
||||
(instead of e.g. cloud credentials) and we omit the first step.
|
||||
|
||||
This allows to provision physical machines, or VMs provided by a 3rd
|
||||
party, or use a cloud for which we don't have a provisioning API.
|
||||
@@ -72,7 +72,7 @@ class: pic
|
||||
|
||||
class: pic
|
||||
|
||||
## The parallel with the shipping indsutry
|
||||
## The parallel with the shipping industry
|
||||
|
||||

|
||||
|
||||
361
slides/containers/Dockerfile_Tips.md
Normal file
361
slides/containers/Dockerfile_Tips.md
Normal file
@@ -0,0 +1,361 @@
|
||||
# Tips for efficient Dockerfiles
|
||||
|
||||
We will see how to:
|
||||
|
||||
* Reduce the number of layers.
|
||||
|
||||
* Leverage the build cache so that builds can be faster.
|
||||
|
||||
* Embed unit testing in the build process.
|
||||
|
||||
---
|
||||
|
||||
## Reducing the number of layers
|
||||
|
||||
* Each line in a `Dockerfile` creates a new layer.
|
||||
|
||||
* Build your `Dockerfile` to take advantage of Docker's caching system.
|
||||
|
||||
* Combine commands by using `&&` to continue commands and `\` to wrap lines.
|
||||
|
||||
Note: it is frequent to build a Dockerfile line by line:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get install thisthing
|
||||
RUN apt-get install andthatthing andthatotherone
|
||||
RUN apt-get install somemorestuff
|
||||
```
|
||||
|
||||
And then refactor it trivially before shipping:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get install thisthing andthatthing andthatotherone somemorestuff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Avoid re-installing dependencies at each build
|
||||
|
||||
* Classic Dockerfile problem:
|
||||
|
||||
"each time I change a line of code, all my dependencies are re-installed!"
|
||||
|
||||
* Solution: `COPY` dependency lists (`package.json`, `requirements.txt`, etc.)
|
||||
by themselves to avoid reinstalling unchanged dependencies every time.
|
||||
|
||||
---
|
||||
|
||||
## Example "bad" `Dockerfile`
|
||||
|
||||
The dependencies are reinstalled every time, because the build system does not know if `requirements.txt` has been updated.
|
||||
|
||||
```bash
|
||||
FROM python
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
RUN pip install -qr requirements.txt
|
||||
EXPOSE 5000
|
||||
CMD ["python", "app.py"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fixed `Dockerfile`
|
||||
|
||||
Adding the dependencies as a separate step means that Docker can cache more efficiently and only install them when `requirements.txt` changes.
|
||||
|
||||
```bash
|
||||
FROM python
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -qr /tmp/requirements.txt
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
EXPOSE 5000
|
||||
CMD ["python", "app.py"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Embedding unit tests in the build process
|
||||
|
||||
```dockerfile
|
||||
FROM <baseimage>
|
||||
RUN <install dependencies>
|
||||
COPY <code>
|
||||
RUN <build code>
|
||||
RUN <install test dependencies>
|
||||
COPY <test data sets and fixtures>
|
||||
RUN <unit tests>
|
||||
FROM <baseimage>
|
||||
RUN <install dependencies>
|
||||
COPY <code>
|
||||
RUN <build code>
|
||||
CMD, EXPOSE ...
|
||||
```
|
||||
|
||||
* The build fails as soon as an instruction fails
|
||||
* If `RUN <unit tests>` fails, the build doesn't produce an image
|
||||
* If it succeeds, it produces a clean image (without test libraries and data)
|
||||
|
||||
---
|
||||
|
||||
# Dockerfile examples
|
||||
|
||||
There are a number of tips, tricks, and techniques that we can use in Dockerfiles.
|
||||
|
||||
But sometimes, we have to use different (and even opposed) practices depending on:
|
||||
|
||||
- the complexity of our project,
|
||||
|
||||
- the programming language or framework that we are using,
|
||||
|
||||
- the stage of our project (early MVP vs. super-stable production),
|
||||
|
||||
- whether we're building a final image or a base for further images,
|
||||
|
||||
- etc.
|
||||
|
||||
We are going to show a few examples using very different techniques.
|
||||
|
||||
---
|
||||
|
||||
## When to optimize an image
|
||||
|
||||
When authoring official images, it is a good idea to reduce as much as possible:
|
||||
|
||||
- the number of layers,
|
||||
|
||||
- the size of the final image.
|
||||
|
||||
This is often done at the expense of build time and convenience for the image maintainer;
|
||||
but when an image is downloaded millions of time, saving even a few seconds of pull time
|
||||
can be worth it.
|
||||
|
||||
.small[
|
||||
```dockerfile
|
||||
RUN apt-get update && apt-get install -y libpng12-dev libjpeg-dev && rm -rf /var/lib/apt/lists/* \
|
||||
&& docker-php-ext-configure gd --with-png-dir=/usr --with-jpeg-dir=/usr \
|
||||
&& docker-php-ext-install gd
|
||||
...
|
||||
RUN curl -o wordpress.tar.gz -SL https://wordpress.org/wordpress-${WORDPRESS_UPSTREAM_VERSION}.tar.gz \
|
||||
&& echo "$WORDPRESS_SHA1 *wordpress.tar.gz" | sha1sum -c - \
|
||||
&& tar -xzf wordpress.tar.gz -C /usr/src/ \
|
||||
&& rm wordpress.tar.gz \
|
||||
&& chown -R www-data:www-data /usr/src/wordpress
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [Wordpress official image](https://github.com/docker-library/wordpress/blob/618490d4bdff6c5774b84b717979bfe3d6ba8ad1/apache/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## When to *not* optimize an image
|
||||
|
||||
Sometimes, it is better to prioritize *maintainer convenience*.
|
||||
|
||||
In particular, if:
|
||||
|
||||
- the image changes a lot,
|
||||
|
||||
- the image has very few users (e.g. only 1, the maintainer!),
|
||||
|
||||
- the image is built and run on the same machine,
|
||||
|
||||
- the image is built and run on machines with a very fast link ...
|
||||
|
||||
In these cases, just keep things simple!
|
||||
|
||||
(Next slide: a Dockerfile that can be used to preview a Jekyll / github pages site.)
|
||||
|
||||
---
|
||||
|
||||
```dockerfile
|
||||
FROM debian:sid
|
||||
|
||||
RUN apt-get update -q
|
||||
RUN apt-get install -yq build-essential make
|
||||
RUN apt-get install -yq zlib1g-dev
|
||||
RUN apt-get install -yq ruby ruby-dev
|
||||
RUN apt-get install -yq python-pygments
|
||||
RUN apt-get install -yq nodejs
|
||||
RUN apt-get install -yq cmake
|
||||
RUN gem install --no-rdoc --no-ri github-pages
|
||||
|
||||
COPY . /blog
|
||||
WORKDIR /blog
|
||||
|
||||
VOLUME /blog/_site
|
||||
|
||||
EXPOSE 4000
|
||||
CMD ["jekyll", "serve", "--host", "0.0.0.0", "--incremental"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-dimensional versioning systems
|
||||
|
||||
Images can have a tag, indicating the version of the image.
|
||||
|
||||
But sometimes, there are multiple important components, and we need to indicate the versions
|
||||
for all of them.
|
||||
|
||||
This can be done with environment variables:
|
||||
|
||||
```dockerfile
|
||||
ENV PIP=9.0.3 \
|
||||
ZC_BUILDOUT=2.11.2 \
|
||||
SETUPTOOLS=38.7.0 \
|
||||
PLONE_MAJOR=5.1 \
|
||||
PLONE_VERSION=5.1.0 \
|
||||
PLONE_MD5=76dc6cfc1c749d763c32fff3a9870d8d
|
||||
```
|
||||
|
||||
(Source: [Plone official image](https://github.com/plone/plone.docker/blob/master/5.1/5.1.0/alpine/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Entrypoints and wrappers
|
||||
|
||||
It is very common to define a custom entrypoint.
|
||||
|
||||
That entrypoint will generally be a script, performing any combination of:
|
||||
|
||||
- pre-flights checks (if a required dependency is not available, display
|
||||
a nice error message early instead of an obscure one in a deep log file),
|
||||
|
||||
- generation or validation of configuration files,
|
||||
|
||||
- dropping privileges (with e.g. `su` or `gosu`, sometimes combined with `chown`),
|
||||
|
||||
- and more.
|
||||
|
||||
---
|
||||
|
||||
## A typical entrypoint script
|
||||
|
||||
```dockerfile
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# first arg is '-f' or '--some-option'
|
||||
# or first arg is 'something.conf'
|
||||
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
|
||||
set -- redis-server "$@"
|
||||
fi
|
||||
|
||||
# allow the container to be started with '--user'
|
||||
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
|
||||
chown -R redis .
|
||||
exec su-exec redis "$0" "$@"
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
```
|
||||
|
||||
(Source: [Redis official image](https://github.com/docker-library/redis/blob/d24f2be82673ccef6957210cc985e392ebdc65e4/4.0/alpine/docker-entrypoint.sh))
|
||||
|
||||
---
|
||||
|
||||
## Factoring information
|
||||
|
||||
To facilitate maintenance (and avoid human errors), avoid to repeat information like:
|
||||
|
||||
- version numbers,
|
||||
|
||||
- remote asset URLs (e.g. source tarballs) ...
|
||||
|
||||
Instead, use environment variables.
|
||||
|
||||
.small[
|
||||
```dockerfile
|
||||
ENV NODE_VERSION 10.2.1
|
||||
...
|
||||
RUN ...
|
||||
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION.tar.xz" \
|
||||
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/SHASUMS256.txt.asc" \
|
||||
&& gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc \
|
||||
&& grep " node-v$NODE_VERSION.tar.xz\$" SHASUMS256.txt | sha256sum -c - \
|
||||
&& tar -xf "node-v$NODE_VERSION.tar.xz" \
|
||||
&& cd "node-v$NODE_VERSION" \
|
||||
...
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [Nodejs official image](https://github.com/nodejs/docker-node/blob/master/10/alpine/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Overrides
|
||||
|
||||
In theory, development and production images should be the same.
|
||||
|
||||
In practice, we often need to enable specific behaviors in development (e.g. debug statements).
|
||||
|
||||
One way to reconcile both needs is to use Compose to enable these behaviors.
|
||||
|
||||
Let's look at the [trainingwheels](https://github.com/jpetazzo/trainingwheels) demo app for an example.
|
||||
|
||||
---
|
||||
|
||||
## Production image
|
||||
|
||||
This Dockerfile builds an image leveraging gunicorn:
|
||||
|
||||
```dockerfile
|
||||
FROM python
|
||||
RUN pip install flask
|
||||
RUN pip install gunicorn
|
||||
RUN pip install redis
|
||||
COPY . /src
|
||||
WORKDIR /src
|
||||
CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
|
||||
EXPOSE 5000
|
||||
```
|
||||
|
||||
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Development Compose file
|
||||
|
||||
This Compose file uses the same image, but with a few overrides for development:
|
||||
|
||||
- the Flask development server is used (overriding `CMD`),
|
||||
|
||||
- the `DEBUG` environment variable is set,
|
||||
|
||||
- a volume is used to provide a faster local development workflow.
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
services:
|
||||
www:
|
||||
build: www
|
||||
ports:
|
||||
- 8000:5000
|
||||
user: nobody
|
||||
environment:
|
||||
DEBUG: 1
|
||||
command: python counter.py
|
||||
volumes:
|
||||
- ./www:/src
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [trainingwheels Compose file](https://github.com/jpetazzo/trainingwheels/blob/master/docker-compose.yml))
|
||||
|
||||
---
|
||||
|
||||
## How to know which best practices are better?
|
||||
|
||||
- The main goal of containers is to make our lives easier.
|
||||
|
||||
- In this chapter, we showed many ways to write Dockerfiles.
|
||||
|
||||
- These Dockerfiles use sometimes diametrally opposed techniques.
|
||||
|
||||
- Yet, they were the "right" ones *for a specific situation.*
|
||||
|
||||
- It's OK (and even encouraged) to start simple and evolve as needed.
|
||||
|
||||
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
|
||||
173
slides/containers/Ecosystem.md
Normal file
173
slides/containers/Ecosystem.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# The container ecosystem
|
||||
|
||||
In this chapter, we will talk about a few actors of the container ecosystem.
|
||||
|
||||
We have (arbitrarily) decided to focus on two groups:
|
||||
|
||||
- the Docker ecosystem,
|
||||
|
||||
- the Cloud Native Computing Foundation (CNCF) and its projects.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The Docker ecosystem
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Moby vs. Docker
|
||||
|
||||
- Docker Inc. (the company) started Docker (the open source project).
|
||||
|
||||
- At some point, it became necessary to differentiate between:
|
||||
|
||||
- the open source project (code base, contributors...),
|
||||
|
||||
- the product that we use to run containers (the engine),
|
||||
|
||||
- the platform that we use to manage containerized applications,
|
||||
|
||||
- the brand.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Exercise in brand management
|
||||
|
||||
Questions:
|
||||
|
||||
--
|
||||
|
||||
- What is the brand of the car on the previous slide?
|
||||
|
||||
--
|
||||
|
||||
- What kind of engine does it have?
|
||||
|
||||
--
|
||||
|
||||
- Would you say that it's a safe or unsafe car?
|
||||
|
||||
--
|
||||
|
||||
- Harder question: can you drive from the US West to East coasts with it?
|
||||
|
||||
--
|
||||
|
||||
The answers to these questions are part of the Tesla brand.
|
||||
|
||||
---
|
||||
|
||||
## What if ...
|
||||
|
||||
- The blueprints for Tesla cars were available for free.
|
||||
|
||||
- You could legally build your own Tesla.
|
||||
|
||||
- You were allowed to customize it entirely.
|
||||
|
||||
(Put a combustion engine, drive it with a game pad ...)
|
||||
|
||||
- You could even sell the customized versions.
|
||||
|
||||
--
|
||||
|
||||
- ... And call your customized version "Tesla".
|
||||
|
||||
--
|
||||
|
||||
Would we give the same answers to the questions on the previous slide?
|
||||
|
||||
---
|
||||
|
||||
## From Docker to Moby
|
||||
|
||||
- Docker Inc. decided to split the brand.
|
||||
|
||||
- Moby is the open source project.
|
||||
|
||||
(= Components and libraries that you can use, reuse, customize, sell ...)
|
||||
|
||||
- Docker is the product.
|
||||
|
||||
(= Software that you can use, buy support contracts ...)
|
||||
|
||||
- Docker is made with Moby.
|
||||
|
||||
- When Docker Inc. improves the Docker products, it improves Moby.
|
||||
|
||||
(And vice versa.)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Other examples
|
||||
|
||||
- *Read the Docs* is an open source project to generate and host documentation.
|
||||
|
||||
- You can host it yourself (on your own servers).
|
||||
|
||||
- You can also get hosted on readthedocs.org.
|
||||
|
||||
- The maintainers of the open source project often receive
|
||||
support requests from users of the hosted product ...
|
||||
|
||||
- ... And the maintainers of the hosted product often
|
||||
receive support requests from users of self-hosted instances.
|
||||
|
||||
- Another example:
|
||||
|
||||
*WordPress.com is a blogging platform that is owned and hosted online by
|
||||
Automattic. It is run on WordPress, an open source piece of software used by
|
||||
bloggers. (Wikipedia)*
|
||||
|
||||
---
|
||||
|
||||
## Docker CE vs Docker EE
|
||||
|
||||
- Docker CE = Community Edition.
|
||||
|
||||
- Available on most Linux distros, Mac, Windows.
|
||||
|
||||
- Optimized for developers and ease of use.
|
||||
|
||||
- Docker EE = Enterprise Edition.
|
||||
|
||||
- Available only on a subset of Linux distros + Windows servers.
|
||||
|
||||
(Only available when there is a strong partnership to offer enterprise-class support.)
|
||||
|
||||
- Optimized for production use.
|
||||
|
||||
- Comes with additional components: security scanning, RBAC ...
|
||||
|
||||
---
|
||||
|
||||
## The CNCF
|
||||
|
||||
- Non-profit, part of the Linux Foundation; founded in December 2015.
|
||||
|
||||
*The Cloud Native Computing Foundation builds sustainable ecosystems and fosters
|
||||
a community around a constellation of high-quality projects that orchestrate
|
||||
containers as part of a microservices architecture.*
|
||||
|
||||
*CNCF is an open source software foundation dedicated to making cloud-native computing universal and sustainable.*
|
||||
|
||||
- Home of Kubernetes (and many other projects now).
|
||||
|
||||
- Funded by corporate memberships.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
@@ -110,6 +110,8 @@ Beautiful! .emoji[😍]
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Counting packages in the container
|
||||
|
||||
Let's check how many packages are installed there.
|
||||
@@ -127,6 +129,8 @@ How many packages do we have on our host?
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Counting packages on the host
|
||||
|
||||
Exit the container by logging out of the shell, like you would usually do.
|
||||
@@ -145,18 +149,34 @@ Now, try to:
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Comparing the container and the host
|
||||
|
||||
Exit the container by logging out of the shell, with `^D` or `exit`.
|
||||
|
||||
Now try to run `figlet`. Does that work?
|
||||
|
||||
(It shouldn't; except if, by coincidence, you are running on a machine where figlet was installed before.)
|
||||
|
||||
---
|
||||
|
||||
## Host and containers are independent things
|
||||
|
||||
* We ran an `ubuntu` container on an `ubuntu` host.
|
||||
* We ran an `ubuntu` container on an Linux/Windows/macOS host.
|
||||
|
||||
* But they have different, independent packages.
|
||||
* They have different, independent packages.
|
||||
|
||||
* Installing something on the host doesn't expose it to the container.
|
||||
|
||||
* And vice-versa.
|
||||
|
||||
* Even if both the host and the container have the same Linux distro!
|
||||
|
||||
* We can run *any container* on *any host*.
|
||||
|
||||
(One exception: Windows containers cannot run on Linux machines; at least not yet.)
|
||||
|
||||
---
|
||||
|
||||
## Where's our container?
|
||||
228
slides/containers/Getting_Inside.md
Normal file
228
slides/containers/Getting_Inside.md
Normal file
@@ -0,0 +1,228 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Getting inside a container
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Objectives
|
||||
|
||||
On a traditional server or VM, we sometimes need to:
|
||||
|
||||
* log into the machine (with SSH or on the console),
|
||||
|
||||
* analyze the disks (by removing them or rebooting with a rescue system).
|
||||
|
||||
In this chapter, we will see how to do that with containers.
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell
|
||||
|
||||
Every once in a while, we want to log into a machine.
|
||||
|
||||
In an perfect world, this shouldn't be necessary.
|
||||
|
||||
* You need to install or update packages (and their configuration)?
|
||||
|
||||
Use configuration management. (e.g. Ansible, Chef, Puppet, Salt...)
|
||||
|
||||
* You need to view logs and metrics?
|
||||
|
||||
Collect and access them through a centralized platform.
|
||||
|
||||
In the real world, though ... we often need shell access!
|
||||
|
||||
---
|
||||
|
||||
## Not getting a shell
|
||||
|
||||
Even without a perfect deployment system, we can do many operations without getting a shell.
|
||||
|
||||
* Installing packages can (and should) be done in the container image.
|
||||
|
||||
* Configuration can be done at the image level, or when the container starts.
|
||||
|
||||
* Dynamic configuration can be stored in a volume (shared with another container).
|
||||
|
||||
* Logs written to stdout are automatically collected by the Docker Engine.
|
||||
|
||||
* Other logs can be written to a shared volume.
|
||||
|
||||
* Process information and metrics are visible from the host.
|
||||
|
||||
_Let's save logging, volumes ... for later, but let's have a look at process information!_
|
||||
|
||||
---
|
||||
|
||||
## Viewing container processes from the host
|
||||
|
||||
If you run Docker on Linux, container processes are visible on the host.
|
||||
|
||||
```bash
|
||||
$ ps faux | less
|
||||
```
|
||||
|
||||
* Scroll around the output of this command.
|
||||
|
||||
* You should see the `jpetazzo/clock` container.
|
||||
|
||||
* A containerized process is just like any other process on the host.
|
||||
|
||||
* We can use tools like `lsof`, `strace`, `gdb` ... To analyze them.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's the difference between a container process and a host process?
|
||||
|
||||
* Each process (containerized or not) belongs to *namespaces* and *cgroups*.
|
||||
|
||||
* The namespaces and cgroups determine what a process can "see" and "do".
|
||||
|
||||
* Analogy: each process (containerized or not) runs with a specific UID (user ID).
|
||||
|
||||
* UID=0 is root, and has elevated privileges. Other UIDs are normal users.
|
||||
|
||||
_We will give more details about namespaces and cgroups later._
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a running container
|
||||
|
||||
* Sometimes, we need to get a shell anyway.
|
||||
|
||||
* We _could_ run some SSH server in the container ...
|
||||
|
||||
* But it is easier to use `docker exec`.
|
||||
|
||||
```bash
|
||||
$ docker exec -ti ticktock sh
|
||||
```
|
||||
|
||||
* This creates a new process (running `sh`) _inside_ the container.
|
||||
|
||||
* This can also be done "manually" with the tool `nsenter`.
|
||||
|
||||
---
|
||||
|
||||
## Caveats
|
||||
|
||||
* The tool that you want to run needs to exist in the container.
|
||||
|
||||
* Some tools (like `ip netns exec`) let you attach to _one_ namespace at a time.
|
||||
|
||||
(This lets you e.g. setup network interfaces, even if you don't have `ifconfig` or `ip` in the container.)
|
||||
|
||||
* Most importantly: the container needs to be running.
|
||||
|
||||
* What if the container is stopped or crashed?
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a stopped container
|
||||
|
||||
* A stopped container is only _storage_ (like a disk drive).
|
||||
|
||||
* We cannot SSH into a disk drive or USB stick!
|
||||
|
||||
* We need to connect the disk to a running machine.
|
||||
|
||||
* How does that translate into the container world?
|
||||
|
||||
---
|
||||
|
||||
## Analyzing a stopped container
|
||||
|
||||
As an exercise, we are going to try to find out what's wrong with `jpetazzo/crashtest`.
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/crashtest
|
||||
```
|
||||
|
||||
The container starts, but then stops immediately, without any output.
|
||||
|
||||
What would MacGyver™ do?
|
||||
|
||||
First, let's check the status of that container.
|
||||
|
||||
```bash
|
||||
docker ps -l
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing filesystem changes
|
||||
|
||||
* We can use `docker diff` to see files that were added / changed / removed.
|
||||
|
||||
```bash
|
||||
docker diff <container_id>
|
||||
```
|
||||
|
||||
* The container ID was shown by `docker ps -l`.
|
||||
|
||||
* We can also see it with `docker ps -lq`.
|
||||
|
||||
* The output of `docker diff` shows some interesting log files!
|
||||
|
||||
---
|
||||
|
||||
## Accessing files
|
||||
|
||||
* We can extract files with `docker cp`.
|
||||
|
||||
```bash
|
||||
docker cp <container_id>:/var/log/nginx/error.log .
|
||||
```
|
||||
|
||||
* Then we can look at that log file.
|
||||
|
||||
```bash
|
||||
cat error.log
|
||||
```
|
||||
|
||||
(The directory `/run/nginx` doesn't exist.)
|
||||
|
||||
---
|
||||
|
||||
## Exploring a crashed container
|
||||
|
||||
* We can restart a container with `docker start` ...
|
||||
|
||||
* ... But it will probably crash again immediately!
|
||||
|
||||
* We cannot specify a different program to run with `docker start`
|
||||
|
||||
* But we can create a new image from the crashed container
|
||||
|
||||
```bash
|
||||
docker commit <container_id> debugimage
|
||||
```
|
||||
|
||||
* Then we can run a new container from that image, with a custom entrypoint
|
||||
|
||||
```bash
|
||||
docker run -ti --entrypoint sh debugimage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Obtaining a complete dump
|
||||
|
||||
* We can also dump the entire filesystem of a container.
|
||||
|
||||
* This is done with `docker export`.
|
||||
|
||||
* It generates a tar archive.
|
||||
|
||||
```bash
|
||||
docker export <container_id> | tar tv
|
||||
```
|
||||
|
||||
This will give a detailed listing of the content of the container.
|
||||
@@ -46,6 +46,8 @@ In this section, we will explain:
|
||||
|
||||
## Example for a Java webapp
|
||||
|
||||
Each of the following items will correspond to one layer:
|
||||
|
||||
* CentOS base layer
|
||||
* Packages and configuration files added by our local IT
|
||||
* JRE
|
||||
@@ -56,6 +58,14 @@ In this section, we will explain:
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The read-write layer
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Differences between containers and images
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
@@ -63,24 +73,22 @@ In this section, we will explain:
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
|
||||
* `docker run` starts a container from a given image.
|
||||
|
||||
Let's give a couple of metaphors to illustrate those concepts.
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Multiple containers sharing the same image
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Image as stencils
|
||||
|
||||
Images are like templates or stencils that you can create containers from.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Object-oriented programming
|
||||
## Comparison with object-oriented programming
|
||||
|
||||
* Images are conceptually similar to *classes*.
|
||||
|
||||
@@ -99,7 +107,7 @@ If an image is read-only, how do we change it?
|
||||
* We create a new container from that image.
|
||||
|
||||
* Then we make changes to that container.
|
||||
|
||||
|
||||
* When we are satisfied with those changes, we transform them into a new layer.
|
||||
|
||||
* A new image is created by stacking the new layer on top of the old image.
|
||||
@@ -110,7 +118,7 @@ If an image is read-only, how do we change it?
|
||||
|
||||
* The only way to create an image is by "freezing" a container.
|
||||
|
||||
* The only way to create a container is by instanciating an image.
|
||||
* The only way to create a container is by instantiating an image.
|
||||
|
||||
* Help!
|
||||
|
||||
@@ -118,7 +126,7 @@ If an image is read-only, how do we change it?
|
||||
|
||||
## Creating the first images
|
||||
|
||||
There is a special empty image called `scratch`.
|
||||
There is a special empty image called `scratch`.
|
||||
|
||||
* It allows to *build from scratch*.
|
||||
|
||||
@@ -138,7 +146,7 @@ Note: you will probably never have to do this yourself.
|
||||
* Saves all the changes made to a container into a new layer.
|
||||
* Creates a new image (effectively a copy of the container).
|
||||
|
||||
`docker build`
|
||||
`docker build` **(used 99% of the time)**
|
||||
|
||||
* Performs a repeatable build sequence.
|
||||
* This is the preferred method!
|
||||
@@ -180,6 +188,8 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
|
||||
---
|
||||
|
||||
## User namespace
|
||||
@@ -206,7 +216,7 @@ clock
|
||||
|
||||
---
|
||||
|
||||
## Self-Hosted namespace
|
||||
## Self-hosted namespace
|
||||
|
||||
This namespace holds images which are not hosted on Docker Hub, but on third
|
||||
party registries.
|
||||
@@ -223,6 +233,13 @@ localhost:5000/wordpress
|
||||
* `localhost:5000` is the host and port of the registry
|
||||
* `wordpress` is the name of the image
|
||||
|
||||
Other examples:
|
||||
|
||||
```bash
|
||||
quay.io/coreos/etcd
|
||||
gcr.io/google-containers/hugo
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## How do you store and manage images?
|
||||
@@ -299,9 +316,9 @@ There are two ways to download images.
|
||||
```bash
|
||||
$ docker pull debian:jessie
|
||||
Pulling repository debian
|
||||
b164861940b8: Download complete
|
||||
b164861940b8: Pulling image (jessie) from debian
|
||||
d1881793a057: Download complete
|
||||
b164861940b8: Download complete
|
||||
b164861940b8: Pulling image (jessie) from debian
|
||||
d1881793a057: Download complete
|
||||
```
|
||||
|
||||
* As seen previously, images are made up of layers.
|
||||
@@ -342,6 +359,8 @@ Do specify tags:
|
||||
* To ensure that the same version will be used everywhere.
|
||||
* To ensure repeatability later.
|
||||
|
||||
This is similar to what we would do with `pip install`, `npm install`, etc.
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Installing Docker
|
||||
@@ -29,7 +30,7 @@ We can arbitrarily distinguish:
|
||||
|
||||
* Installing Docker on an existing Linux machine (physical or VM)
|
||||
|
||||
* Installing Docker on MacOS or Windows
|
||||
* Installing Docker on macOS or Windows
|
||||
|
||||
* Installing Docker on a fleet of cloud VMs
|
||||
|
||||
@@ -37,7 +38,9 @@ We can arbitrarily distinguish:
|
||||
|
||||
## Installing Docker on Linux
|
||||
|
||||
* The recommended method is to install the packages supplied by Docker Inc.
|
||||
* The recommended method is to install the packages supplied by Docker Inc.:
|
||||
|
||||
https://store.docker.com
|
||||
|
||||
* The general method is:
|
||||
|
||||
@@ -55,13 +58,35 @@ We can arbitrarily distinguish:
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on MacOS and Windows
|
||||
class: extra-details
|
||||
|
||||
* On MacOS, the recommended method is to use Docker4Mac:
|
||||
## Docker Inc. packages vs distribution packages
|
||||
|
||||
* Docker Inc. releases new versions monthly (edge) and quarterly (stable)
|
||||
|
||||
* Releases are immediately available on Docker Inc.'s package repositories
|
||||
|
||||
* Linux distros don't always update to the latest Docker version
|
||||
|
||||
(Sometimes, updating would break their guidelines for major/minor upgrades)
|
||||
|
||||
* Sometimes, some distros have carried packages with custom patches
|
||||
|
||||
* Sometimes, these patches added critical security bugs ☹
|
||||
|
||||
* Installing through Docker Inc.'s repositories is a bit of extra work …
|
||||
|
||||
… but it is generally worth it!
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on macOS and Windows
|
||||
|
||||
* On macOS, the recommended method is to use Docker Desktop for Mac:
|
||||
|
||||
https://docs.docker.com/docker-for-mac/install/
|
||||
|
||||
* On Windows 10 Pro, Enterprise, and Eduction, you can use Docker4Windows:
|
||||
* On Windows 10 Pro, Enterprise, and Education, you can use Docker Desktop for Windows:
|
||||
|
||||
https://docs.docker.com/docker-for-windows/install/
|
||||
|
||||
@@ -69,9 +94,36 @@ We can arbitrarily distinguish:
|
||||
|
||||
https://docs.docker.com/toolbox/toolbox_install_windows/
|
||||
|
||||
* On Windows Server 2016, you can also install the native engine:
|
||||
|
||||
https://docs.docker.com/install/windows/docker-ee/
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on MacOS and Windows
|
||||
## Docker Desktop for Mac and Docker Desktop for Windows
|
||||
|
||||
* Special Docker Editions that integrate well with their respective host OS
|
||||
|
||||
* Provide user-friendly GUI to edit Docker configuration and settings
|
||||
|
||||
* Leverage the host OS virtualization subsystem (e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* Installed like normal user applications on the host
|
||||
|
||||
* Under the hood, they both run a tiny VM (transparent to our daily use)
|
||||
|
||||
* Access network resources like normal applications
|
||||
<br/>(and therefore, play better with enterprise VPNs and firewalls)
|
||||
|
||||
* Support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
<br/>
|
||||
... but we can use `docker-machine`, the Docker Toolbox, VirtualBox, etc. to get a cluster.
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on macOS and Windows
|
||||
|
||||
When you execute `docker version` from the terminal:
|
||||
|
||||
@@ -88,25 +140,6 @@ This will also allow to use remote Engines exactly as if they were local.
|
||||
|
||||
---
|
||||
|
||||
## Docker4Mac and Docker4Windows
|
||||
|
||||
* They let you run Docker without VirtualBox
|
||||
|
||||
* They are installed like normal applications (think QEMU, but faster)
|
||||
|
||||
* They access network resources like normal applications
|
||||
<br/>(and therefore, play well with enterprise VPNs and firewalls)
|
||||
|
||||
* They support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
|
||||
... so if you want to run a full cluster locally, install e.g. the Docker Toolbox
|
||||
|
||||
* They can co-exist with the Docker Toolbox
|
||||
|
||||
---
|
||||
|
||||
## Important PSA about security
|
||||
|
||||
* If you have access to the Docker control socket, you can take over the machine
|
||||
82
slides/containers/Labels.md
Normal file
82
slides/containers/Labels.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Labels
|
||||
|
||||
* Labels allow to attach arbitrary metadata to containers.
|
||||
|
||||
* Labels are key/value pairs.
|
||||
|
||||
* They are specified at container creation.
|
||||
|
||||
* You can query them with `docker inspect`.
|
||||
|
||||
* They can also be used as filters with some commands (e.g. `docker ps`).
|
||||
|
||||
---
|
||||
|
||||
## Using labels
|
||||
|
||||
Let's create a few containers with a label `owner`.
|
||||
|
||||
```bash
|
||||
docker run -d -l owner=alice nginx
|
||||
docker run -d -l owner=bob nginx
|
||||
docker run -d -l owner nginx
|
||||
```
|
||||
|
||||
We didn't specify a value for the `owner` label in the last example.
|
||||
|
||||
This is equivalent to setting the value to be an empty string.
|
||||
|
||||
---
|
||||
|
||||
## Querying labels
|
||||
|
||||
We can view the labels with `docker inspect`.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -lq) | grep -A3 Labels
|
||||
"Labels": {
|
||||
"maintainer": "NGINX Docker Maintainers <docker-maint@nginx.com>",
|
||||
"owner": ""
|
||||
},
|
||||
```
|
||||
|
||||
We can use the `--format` flag to list the value of a label.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -q) --format 'OWNER={{.Config.Labels.owner}}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using labels to select containers
|
||||
|
||||
We can list containers having a specific label.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner
|
||||
```
|
||||
|
||||
Or we can list containers having a specific label with a specific value.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner=alice
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use-cases for labels
|
||||
|
||||
|
||||
* HTTP vhost of a web app or web service.
|
||||
|
||||
(The label is used to generate the configuration for NGINX, HAProxy, etc.)
|
||||
|
||||
* Backup schedule for a stateful service.
|
||||
|
||||
(The label is used by a cron job to determine if/when to backup container data.)
|
||||
|
||||
* Service ownership.
|
||||
|
||||
(To determine internal cross-billing, or who to page in case of outage.)
|
||||
|
||||
* etc.
|
||||
@@ -17,7 +17,7 @@ At the end of this section, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## Containerized local development environments
|
||||
## Local development in a container
|
||||
|
||||
We want to solve the following issues:
|
||||
|
||||
@@ -69,7 +69,6 @@ Aha, a `Gemfile`! This is Ruby. Probably. We know this. Maybe?
|
||||
|
||||
```dockerfile
|
||||
FROM ruby
|
||||
MAINTAINER Education Team at Docker <education@docker.com>
|
||||
|
||||
COPY . /src
|
||||
WORKDIR /src
|
||||
@@ -177,7 +176,9 @@ $ docker run -d -v $(pwd):/src -P namer
|
||||
|
||||
* `namer` is the name of the image we will run.
|
||||
|
||||
* We don't specify a command to run because is is already set in the Dockerfile.
|
||||
* We don't specify a command to run because it is already set in the Dockerfile.
|
||||
|
||||
Note: on Windows, replace `$(pwd)` with `%cd%` (or `${pwd}` if you use PowerShell).
|
||||
|
||||
---
|
||||
|
||||
@@ -308,54 +309,6 @@ and *canary deployments*.
|
||||
|
||||
---
|
||||
|
||||
## Improving the workflow
|
||||
|
||||
The workflow that we showed is nice, but it requires us to:
|
||||
|
||||
* keep track of all the `docker run` flags required to run the container,
|
||||
|
||||
* inspect the `Dockerfile` to know which path(s) to mount,
|
||||
|
||||
* write scripts to hide that complexity.
|
||||
|
||||
There has to be a better way!
|
||||
|
||||
---
|
||||
|
||||
## Docker Compose to the rescue
|
||||
|
||||
* Docker Compose allows us to "encode" `docker run` parameters in a YAML file.
|
||||
|
||||
* Here is the `docker-compose.yml` file that we can use for our "namer" app:
|
||||
|
||||
```yaml
|
||||
www:
|
||||
build: .
|
||||
volumes:
|
||||
- .:/src
|
||||
ports:
|
||||
- 80:9292
|
||||
```
|
||||
|
||||
* Try it:
|
||||
```bash
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Working with Docker Compose
|
||||
|
||||
* When you see a `docker-compose.yml` file, you can use `docker-compose up`.
|
||||
|
||||
* It can build images and run them with the required parameters.
|
||||
|
||||
* Compose can also deal with complex, multi-container apps.
|
||||
|
||||
(More on this later!)
|
||||
|
||||
---
|
||||
|
||||
## Recap of the development workflow
|
||||
|
||||
1. Write a Dockerfile to build an image containing our development environment.
|
||||
298
slides/containers/Logging.md
Normal file
298
slides/containers/Logging.md
Normal file
@@ -0,0 +1,298 @@
|
||||
# Logging
|
||||
|
||||
In this chapter, we will explain the different ways to send logs from containers.
|
||||
|
||||
We will then show one particular method in action, using ELK and Docker's logging drivers.
|
||||
|
||||
---
|
||||
|
||||
## There are many ways to send logs
|
||||
|
||||
- The simplest method is to write on the standard output and error.
|
||||
|
||||
- Applications can write their logs to local files.
|
||||
|
||||
(The files are usually periodically rotated and compressed.)
|
||||
|
||||
- It is also very common (on UNIX systems) to use syslog.
|
||||
|
||||
(The logs are collected by syslogd or an equivalent like journald.)
|
||||
|
||||
- In large applications with many components, it is common to use a logging service.
|
||||
|
||||
(The code uses a library to send messages to the logging service.)
|
||||
|
||||
*All these methods are available with containers.*
|
||||
|
||||
---
|
||||
|
||||
## Writing on stdout/stderr
|
||||
|
||||
- The standard output and error of containers is managed by the container engine.
|
||||
|
||||
- This means that each line written by the container is received by the engine.
|
||||
|
||||
- The engine can then do "whatever" with these log lines.
|
||||
|
||||
- With Docker, the default configuration is to write the logs to local files.
|
||||
|
||||
- The files can then be queried with e.g. `docker logs` (and the equivalent API request).
|
||||
|
||||
- This can be customized, as we will see later.
|
||||
|
||||
---
|
||||
|
||||
## Writing to local files
|
||||
|
||||
- If we write to files, it is possible to access them but cumbersome.
|
||||
|
||||
(We have to use `docker exec` or `docker cp`.)
|
||||
|
||||
- Furthermore, if the container is stopped, we cannot use `docker exec`.
|
||||
|
||||
- If the container is deleted, the logs disappear.
|
||||
|
||||
- What should we do for programs who can only log to local files?
|
||||
|
||||
--
|
||||
|
||||
- There are multiple solutions.
|
||||
|
||||
---
|
||||
|
||||
## Using a volume or bind mount
|
||||
|
||||
- Instead of writing logs to a normal directory, we can place them on a volume.
|
||||
|
||||
- The volume can be accessed by other containers.
|
||||
|
||||
- We can run a program like `filebeat` in another container accessing the same volume.
|
||||
|
||||
(`filebeat` reads local log files continuously, like `tail -f`, and sends them
|
||||
to a centralized system like ElasticSearch.)
|
||||
|
||||
- We can also use a bind mount, e.g. `-v /var/log/containers/www:/var/log/tomcat`.
|
||||
|
||||
- The container will write log files to a directory mapped to a host directory.
|
||||
|
||||
- The log files will appear on the host and be consumable directly from the host.
|
||||
|
||||
---
|
||||
|
||||
## Using logging services
|
||||
|
||||
- We can use logging frameworks (like log4j or the Python `logging` package).
|
||||
|
||||
- These frameworks require some code and/or configuration in our application code.
|
||||
|
||||
- These mechanisms can be used identically inside or outside of containers.
|
||||
|
||||
- Sometimes, we can leverage containerized networking to simplify their setup.
|
||||
|
||||
- For instance, our code can send log messages to a server named `log`.
|
||||
|
||||
- The name `log` will resolve to different addresses in development, production, etc.
|
||||
|
||||
---
|
||||
|
||||
## Using syslog
|
||||
|
||||
- What if our code (or the program we are running in containers) uses syslog?
|
||||
|
||||
- One possibility is to run a syslog daemon in the container.
|
||||
|
||||
- Then that daemon can be setup to write to local files or forward to the network.
|
||||
|
||||
- Under the hood, syslog clients connect to a local UNIX socket, `/dev/log`.
|
||||
|
||||
- We can expose a syslog socket to the container (by using a volume or bind-mount).
|
||||
|
||||
- Then just create a symlink from `/dev/log` to the syslog socket.
|
||||
|
||||
- Voilà!
|
||||
|
||||
---
|
||||
|
||||
## Using logging drivers
|
||||
|
||||
- If we log to stdout and stderr, the container engine receives the log messages.
|
||||
|
||||
- The Docker Engine has a modular logging system with many plugins, including:
|
||||
|
||||
- json-file (the default one)
|
||||
- syslog
|
||||
- journald
|
||||
- gelf
|
||||
- fluentd
|
||||
- splunk
|
||||
- etc.
|
||||
|
||||
- Each plugin can process and forward the logs to another process or system.
|
||||
|
||||
---
|
||||
|
||||
## A word of warning about `json-file`
|
||||
|
||||
- By default, log file size is unlimited.
|
||||
|
||||
- This means that a very verbose container *will* use up all your disk space.
|
||||
|
||||
(Or a less verbose container, but running for a very long time.)
|
||||
|
||||
- Log rotation can be enabled by setting a `max-size` option.
|
||||
|
||||
- Older log files can be removed by setting a `max-file` option.
|
||||
|
||||
- Just like other logging options, these can be set per container, or globally.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
$ docker run --log-opt max-size=10m --log-opt max-file=3 elasticsearch
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Demo: sending logs to ELK
|
||||
|
||||
- We are going to deploy an ELK stack.
|
||||
|
||||
- It will accept logs over a GELF socket.
|
||||
|
||||
- We will run a few containers with the `gelf` logging driver.
|
||||
|
||||
- We will then see our logs in Kibana, the web interface provided by ELK.
|
||||
|
||||
*Important foreword: this is not an "official" or "recommended"
|
||||
setup; it is just an example. We used ELK in this demo because
|
||||
it's a popular setup and we keep being asked about it; but you
|
||||
will have equal success with Fluent or other logging stacks!*
|
||||
|
||||
---
|
||||
|
||||
## What's in an ELK stack?
|
||||
|
||||
- ELK is three components:
|
||||
|
||||
- ElasticSearch (to store and index log entries)
|
||||
|
||||
- Logstash (to receive log entries from various
|
||||
sources, process them, and forward them to various
|
||||
destinations)
|
||||
|
||||
- Kibana (to view/search log entries with a nice UI)
|
||||
|
||||
- The only component that we will configure is Logstash
|
||||
|
||||
- We will accept log entries using the GELF protocol
|
||||
|
||||
- Log entries will be stored in ElasticSearch,
|
||||
<br/>and displayed on Logstash's stdout for debugging
|
||||
|
||||
---
|
||||
|
||||
## Running ELK
|
||||
|
||||
- We are going to use a Compose file describing the ELK stack.
|
||||
|
||||
- The Compose file is in the container.training repository on GitHub.
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training
|
||||
$ cd elk
|
||||
$ docker-compose up
|
||||
```
|
||||
|
||||
- Let's have a look at the Compose file while it's deploying.
|
||||
|
||||
---
|
||||
|
||||
## Our basic ELK deployment
|
||||
|
||||
- We are using images from the Docker Hub: `elasticsearch`, `logstash`, `kibana`.
|
||||
|
||||
- We don't need to change the configuration of ElasticSearch.
|
||||
|
||||
- We need to tell Kibana the address of ElasticSearch:
|
||||
|
||||
- it is set with the `ELASTICSEARCH_URL` environment variable,
|
||||
|
||||
- by default it is `localhost:9200`, we change it to `elasticsearch:9200`.
|
||||
|
||||
- We need to configure Logstash:
|
||||
|
||||
- we pass the entire configuration file through command-line arguments,
|
||||
|
||||
- this is a hack so that we don't have to create an image just for the config.
|
||||
|
||||
---
|
||||
|
||||
## Sending logs to ELK
|
||||
|
||||
- The ELK stack accepts log messages through a GELF socket.
|
||||
|
||||
- The GELF socket listens on UDP port 12201.
|
||||
|
||||
- To send a message, we need to change the logging driver used by Docker.
|
||||
|
||||
- This can be done globally (by reconfiguring the Engine) or on a per-container basis.
|
||||
|
||||
- Let's override the logging driver for a single container:
|
||||
|
||||
```bash
|
||||
$ docker run --log-driver=gelf --log-opt=gelf-address=udp://localhost:12201 \
|
||||
alpine echo hello world
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing the logs in ELK
|
||||
|
||||
- Connect to the Kibana interface.
|
||||
|
||||
- It is exposed on port 5601.
|
||||
|
||||
- Browse http://X.X.X.X:5601.
|
||||
|
||||
---
|
||||
|
||||
## "Configuring" Kibana
|
||||
|
||||
- Kibana should offer you to "Configure an index pattern":
|
||||
<br/>in the "Time-field name" drop down, select "@timestamp", and hit the
|
||||
"Create" button.
|
||||
|
||||
- Then:
|
||||
|
||||
- click "Discover" (in the top-left corner),
|
||||
- click "Last 15 minutes" (in the top-right corner),
|
||||
- click "Last 1 hour" (in the list in the middle),
|
||||
- click "Auto-refresh" (top-right corner),
|
||||
- click "5 seconds" (top-left of the list).
|
||||
|
||||
- You should see a series of green bars (with one new green bar every minute).
|
||||
|
||||
- Our 'hello world' message should be visible there.
|
||||
|
||||
---
|
||||
|
||||
## Important afterword
|
||||
|
||||
**This is not a "production-grade" setup.**
|
||||
|
||||
It is just an educational example. Since we have only
|
||||
one node , we did set up a single
|
||||
ElasticSearch instance and a single Logstash instance.
|
||||
|
||||
In a production setup, you need an ElasticSearch cluster
|
||||
(both for capacity and availability reasons). You also
|
||||
need multiple Logstash instances.
|
||||
|
||||
And if you want to withstand
|
||||
bursts of logs, you need some kind of message queue:
|
||||
Redis if you're cheap, Kafka if you want to make sure
|
||||
that you don't drop messages on the floor. Good luck.
|
||||
|
||||
If you want to learn more about the GELF driver,
|
||||
have a look at [this blog post](
|
||||
https://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
|
||||
315
slides/containers/Multi_Stage_Builds.md
Normal file
315
slides/containers/Multi_Stage_Builds.md
Normal file
@@ -0,0 +1,315 @@
|
||||
# Reducing image size
|
||||
|
||||
* In the previous example, our final image contained:
|
||||
|
||||
* our `hello` program
|
||||
|
||||
* its source code
|
||||
|
||||
* the compiler
|
||||
|
||||
* Only the first one is strictly necessary.
|
||||
|
||||
* We are going to see how to obtain an image without the superfluous components.
|
||||
|
||||
---
|
||||
|
||||
## Can't we remove superfluous files with `RUN`?
|
||||
|
||||
What happens if we do one of the following commands?
|
||||
|
||||
- `RUN rm -rf ...`
|
||||
|
||||
- `RUN apt-get remove ...`
|
||||
|
||||
- `RUN make clean ...`
|
||||
|
||||
--
|
||||
|
||||
This adds a layer which removes a bunch of files.
|
||||
|
||||
But the previous layers (which added the files) still exist.
|
||||
|
||||
---
|
||||
|
||||
## Removing files with an extra layer
|
||||
|
||||
When downloading an image, all the layers must be downloaded.
|
||||
|
||||
| Dockerfile instruction | Layer size | Image size |
|
||||
| ---------------------- | ---------- | ---------- |
|
||||
| `FROM ubuntu` | Size of base image | Size of base image |
|
||||
| `...` | ... | Sum of this layer <br/>+ all previous ones |
|
||||
| `RUN apt-get install somepackage` | Size of files added <br/>(e.g. a few MB) | Sum of this layer <br/>+ all previous ones |
|
||||
| `...` | ... | Sum of this layer <br/>+ all previous ones |
|
||||
| `RUN apt-get remove somepackage` | Almost zero <br/>(just metadata) | Same as previous one |
|
||||
|
||||
Therefore, `RUN rm` does not reduce the size of the image or free up disk space.
|
||||
|
||||
---
|
||||
|
||||
## Removing unnecessary files
|
||||
|
||||
Various techniques are available to obtain smaller images:
|
||||
|
||||
- collapsing layers,
|
||||
|
||||
- adding binaries that are built outside of the Dockerfile,
|
||||
|
||||
- squashing the final image,
|
||||
|
||||
- multi-stage builds.
|
||||
|
||||
Let's review them quickly.
|
||||
|
||||
---
|
||||
|
||||
## Collapsing layers
|
||||
|
||||
You will frequently see Dockerfiles like this:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
RUN apt-get update && apt-get install xxx && ... && apt-get remove xxx && ...
|
||||
```
|
||||
|
||||
Or the (more readable) variant:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
RUN apt-get update \
|
||||
&& apt-get install xxx \
|
||||
&& ... \
|
||||
&& apt-get remove xxx \
|
||||
&& ...
|
||||
```
|
||||
|
||||
This `RUN` command gives us a single layer.
|
||||
|
||||
The files that are added, then removed in the same layer, do not grow the layer size.
|
||||
|
||||
---
|
||||
|
||||
## Collapsing layers: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- works on all versions of Docker
|
||||
|
||||
- doesn't require extra tools
|
||||
|
||||
Cons:
|
||||
|
||||
- not very readable
|
||||
|
||||
- some unnecessary files might still remain if the cleanup is not thorough
|
||||
|
||||
- that layer is expensive (slow to build)
|
||||
|
||||
---
|
||||
|
||||
## Building binaries outside of the Dockerfile
|
||||
|
||||
This results in a Dockerfile looking like this:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
COPY xxx /usr/local/bin
|
||||
```
|
||||
|
||||
Of course, this implies that the file `xxx` exists in the build context.
|
||||
|
||||
That file has to exist before you can run `docker build`.
|
||||
|
||||
For instance, it can:
|
||||
|
||||
- exist in the code repository,
|
||||
- be created by another tool (script, Makefile...),
|
||||
- be created by another container image and extracted from the image.
|
||||
|
||||
See for instance the [busybox official image](https://github.com/docker-library/busybox/blob/fe634680e32659aaf0ee0594805f74f332619a90/musl/Dockerfile) or this [older busybox image](https://github.com/jpetazzo/docker-busybox).
|
||||
|
||||
---
|
||||
|
||||
## Building binaries outside: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- final image can be very small
|
||||
|
||||
Cons:
|
||||
|
||||
- requires an extra build tool
|
||||
|
||||
- we're back in dependency hell and "works on my machine"
|
||||
|
||||
Cons, if binary is added to code repository:
|
||||
|
||||
- breaks portability across different platforms
|
||||
|
||||
- grows repository size a lot if the binary is updated frequently
|
||||
|
||||
---
|
||||
|
||||
## Squashing the final image
|
||||
|
||||
The idea is to transform the final image into a single-layer image.
|
||||
|
||||
This can be done in (at least) two ways.
|
||||
|
||||
- Activate experimental features and squash the final image:
|
||||
```bash
|
||||
docker image build --squash ...
|
||||
```
|
||||
|
||||
- Export/import the final image.
|
||||
```bash
|
||||
docker build -t temp-image .
|
||||
docker run --entrypoint true --name temp-container temp-image
|
||||
docker export temp-container | docker import - final-image
|
||||
docker rm temp-container
|
||||
docker rmi temp-image
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Squashing the image: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- single-layer images are smaller and faster to download
|
||||
|
||||
- removed files no longer take up storage and network resources
|
||||
|
||||
Cons:
|
||||
|
||||
- we still need to actively remove unnecessary files
|
||||
|
||||
- squash operation can take a lot of time (on big images)
|
||||
|
||||
- squash operation does not benefit from cache
|
||||
<br/>
|
||||
(even if we change just a tiny file, the whole image needs to be re-squashed)
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds
|
||||
|
||||
Multi-stage builds allow us to have multiple *stages*.
|
||||
|
||||
Each stage is a separate image, and can copy files from previous stages.
|
||||
|
||||
We're going to see how they work in more detail.
|
||||
|
||||
---
|
||||
|
||||
# Multi-stage builds
|
||||
|
||||
* At any point in our `Dockerfile`, we can add a new `FROM` line.
|
||||
|
||||
* This line starts a new stage of our build.
|
||||
|
||||
* Each stage can access the files of the previous stages with `COPY --from=...`.
|
||||
|
||||
* When a build is tagged (with `docker build -t ...`), the last stage is tagged.
|
||||
|
||||
* Previous stages are not discarded: they will be used for caching, and can be referenced.
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds in practice
|
||||
|
||||
* Each stage is numbered, starting at `0`
|
||||
|
||||
* We can copy a file from a previous stage by indicating its number, e.g.:
|
||||
|
||||
```dockerfile
|
||||
COPY --from=0 /file/from/first/stage /location/in/current/stage
|
||||
```
|
||||
|
||||
* We can also name stages, and reference these names:
|
||||
|
||||
```dockerfile
|
||||
FROM golang AS builder
|
||||
RUN ...
|
||||
FROM alpine
|
||||
COPY --from=builder /go/bin/mylittlebinary /usr/local/bin/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds for our C program
|
||||
|
||||
We will change our Dockerfile to:
|
||||
|
||||
* give a nickname to the first stage: `compiler`
|
||||
|
||||
* add a second stage using the same `ubuntu` base image
|
||||
|
||||
* add the `hello` binary to the second stage
|
||||
|
||||
* make sure that `CMD` is in the second stage
|
||||
|
||||
The resulting Dockerfile is on the next slide.
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage build `Dockerfile`
|
||||
|
||||
Here is the final Dockerfile:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu AS compiler
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential
|
||||
COPY hello.c /
|
||||
RUN make hello
|
||||
FROM ubuntu
|
||||
COPY --from=compiler /hello /hello
|
||||
CMD /hello
|
||||
```
|
||||
|
||||
Let's build it, and check that it works correctly:
|
||||
|
||||
```bash
|
||||
docker build -t hellomultistage .
|
||||
docker run hellomultistage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Comparing single/multi-stage build image sizes
|
||||
|
||||
List our images with `docker images`, and check the size of:
|
||||
|
||||
- the `ubuntu` base image,
|
||||
|
||||
- the single-stage `hello` image,
|
||||
|
||||
- the multi-stage `hellomultistage` image.
|
||||
|
||||
We can achieve even smaller images if we use smaller base images.
|
||||
|
||||
However, if we use common base images (e.g. if we standardize on `ubuntu`),
|
||||
these common images will be pulled only once per node, so they are
|
||||
virtually "free."
|
||||
|
||||
---
|
||||
|
||||
## Build targets
|
||||
|
||||
* We can also tag an intermediary stage with `docker build --target STAGE --tag NAME`
|
||||
|
||||
* This will create an image (named `NAME`) corresponding to stage `STAGE`
|
||||
|
||||
* This can be used to easily access an intermediary stage for inspection
|
||||
|
||||
(Instead of parsing the output of `docker build` to find out the image ID)
|
||||
|
||||
* This can also be used to describe multiple images from a single Dockerfile
|
||||
|
||||
(Instead of using multiple Dockerfiles, which could go out of sync)
|
||||
|
||||
* Sometimes, we want to inspect a specific intermediary build stage.
|
||||
|
||||
* Or, we want to describe multiple images using a single Dockerfile.
|
||||
1124
slides/containers/Namespaces_Cgroups.md
Normal file
1124
slides/containers/Namespaces_Cgroups.md
Normal file
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user