mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 10:09:56 +00:00
Compare commits
699 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2381510a0b | ||
|
|
732f06729f | ||
|
|
5687b204cd | ||
|
|
db8e8377ac | ||
|
|
510a37be44 | ||
|
|
29a925a50d | ||
|
|
7d67e23e89 | ||
|
|
651e6b720b | ||
|
|
c8cd845b7d | ||
|
|
230bd73597 | ||
|
|
7217c0ee1d | ||
|
|
51882896d4 | ||
|
|
77d455d894 | ||
|
|
39532c7547 | ||
|
|
4f9c8275d9 | ||
|
|
f11aae2514 | ||
|
|
f1e9efc38c | ||
|
|
975cc4f7df | ||
|
|
01243280a2 | ||
|
|
e652c3639d | ||
|
|
1e0954d9b4 | ||
|
|
bb21f9bbc9 | ||
|
|
25466e7950 | ||
|
|
78026ff9b8 | ||
|
|
60c7ef4e53 | ||
|
|
55952934ed | ||
|
|
3eaa844c55 | ||
|
|
f9d31f4c30 | ||
|
|
ec037e422b | ||
|
|
73f66f25d8 | ||
|
|
28174b6cf9 | ||
|
|
a80c095a07 | ||
|
|
374574717d | ||
|
|
efce5d1ad4 | ||
|
|
4eec91a9e6 | ||
|
|
57166f33aa | ||
|
|
f1ebb1f0fb | ||
|
|
8182e4df96 | ||
|
|
6f3580820c | ||
|
|
7b7fd2a4b4 | ||
|
|
f74addd0ca | ||
|
|
21ba3b7713 | ||
|
|
4eca15f822 | ||
|
|
4205f619cf | ||
|
|
c3dff823ef | ||
|
|
39876d1388 | ||
|
|
7e34aa0287 | ||
|
|
3bdafed38e | ||
|
|
3d438ff304 | ||
|
|
bcd1f37085 | ||
|
|
ba928e59fc | ||
|
|
62c01ef7d6 | ||
|
|
a71347e328 | ||
|
|
f235cfa13c | ||
|
|
45b397682b | ||
|
|
858ad02973 | ||
|
|
defeef093d | ||
|
|
b45615e2c3 | ||
|
|
b158babb7f | ||
|
|
59b7386b91 | ||
|
|
c05bcd23d9 | ||
|
|
3cb91855c8 | ||
|
|
dc0850ef3e | ||
|
|
ffdd7fda45 | ||
|
|
83b2133573 | ||
|
|
d04856f964 | ||
|
|
8373d5302f | ||
|
|
7d7cb0eadb | ||
|
|
c00c87f8f2 | ||
|
|
f599462ad7 | ||
|
|
018282f392 | ||
|
|
23b3c1c05a | ||
|
|
62686d0b7a | ||
|
|
54288502a2 | ||
|
|
efc045e40b | ||
|
|
6e9b16511f | ||
|
|
81b6e60a8c | ||
|
|
5baaf7e00a | ||
|
|
d4d460397f | ||
|
|
f66b6b2ee3 | ||
|
|
fb7f7fd8c8 | ||
|
|
dc98fa21a9 | ||
|
|
6b662d3e4c | ||
|
|
7069682c8e | ||
|
|
3b1d5b93a8 | ||
|
|
611fe55e90 | ||
|
|
481272ac22 | ||
|
|
9069e2d7db | ||
|
|
1144c16a4c | ||
|
|
9b2846633c | ||
|
|
db88c0a5bf | ||
|
|
28863728c2 | ||
|
|
dc341da813 | ||
|
|
1d210ad808 | ||
|
|
76d9adadf5 | ||
|
|
065371fa99 | ||
|
|
e45f21454e | ||
|
|
4d8c13b0bf | ||
|
|
5e6b38e8d1 | ||
|
|
5dd2b6313e | ||
|
|
96bf00c59b | ||
|
|
065310901f | ||
|
|
103261ea35 | ||
|
|
c6fb6f30af | ||
|
|
134d24e23b | ||
|
|
8a8e97f6e2 | ||
|
|
29c1bc47d4 | ||
|
|
8af5a10407 | ||
|
|
8e9991a860 | ||
|
|
8ba5d6d736 | ||
|
|
b3d1e2133d | ||
|
|
b3cf30f804 | ||
|
|
b845543e5f | ||
|
|
1b54470046 | ||
|
|
ee2b20926c | ||
|
|
96a76d2a19 | ||
|
|
78ac91fcd5 | ||
|
|
971b5b0e6d | ||
|
|
3393563498 | ||
|
|
94483ebfec | ||
|
|
db5d5878f5 | ||
|
|
2585daac9b | ||
|
|
21043108b3 | ||
|
|
65faa4507c | ||
|
|
644f2b9c7a | ||
|
|
dab9d9fb7e | ||
|
|
139757613b | ||
|
|
10eed2c1c7 | ||
|
|
c4fa75a1da | ||
|
|
847140560f | ||
|
|
1dc07c33ab | ||
|
|
4fc73d95c0 | ||
|
|
690ed55953 | ||
|
|
16a5809518 | ||
|
|
0fed34600b | ||
|
|
2d95f4177a | ||
|
|
e9d1db56fa | ||
|
|
a076a766a9 | ||
|
|
be3c78bf54 | ||
|
|
5bb6b8e2ab | ||
|
|
f79193681d | ||
|
|
379ae69db5 | ||
|
|
cde89f50a2 | ||
|
|
98563ba1ce | ||
|
|
99bf8cc39f | ||
|
|
ea642cf90e | ||
|
|
a7d89062cf | ||
|
|
564e4856b4 | ||
|
|
011cd08af3 | ||
|
|
e294a4726c | ||
|
|
a21e8b0849 | ||
|
|
cc6f36b50f | ||
|
|
6e35162788 | ||
|
|
30ca940eeb | ||
|
|
14eb19a42b | ||
|
|
da053ecde2 | ||
|
|
c86ef7de45 | ||
|
|
c5572020b9 | ||
|
|
3d7ed3a3f7 | ||
|
|
138163056f | ||
|
|
5e78e00bc9 | ||
|
|
2cb06edc2d | ||
|
|
8915bfb443 | ||
|
|
24017ad83f | ||
|
|
3edebe3747 | ||
|
|
636a2d5c87 | ||
|
|
4213aba76e | ||
|
|
3e822bad82 | ||
|
|
cd5b06b9c7 | ||
|
|
b0841562ea | ||
|
|
06f70e8246 | ||
|
|
9614f8761a | ||
|
|
92f9ab9001 | ||
|
|
ad554f89fc | ||
|
|
5bb37dff49 | ||
|
|
0d52dc2290 | ||
|
|
c575cb9cd5 | ||
|
|
9cdccd40c7 | ||
|
|
fdd10c5a98 | ||
|
|
8a617fdbc7 | ||
|
|
a058a74d8f | ||
|
|
4896a3265e | ||
|
|
131947275c | ||
|
|
1b7e8cec5e | ||
|
|
c17c0ea9aa | ||
|
|
7b378d2425 | ||
|
|
47da7d8278 | ||
|
|
3c69941fcd | ||
|
|
beb188facf | ||
|
|
dfea8f6535 | ||
|
|
3b89149bf0 | ||
|
|
c8d73caacd | ||
|
|
290185f16b | ||
|
|
05e9d36eed | ||
|
|
05815fcbf3 | ||
|
|
bce900a4ca | ||
|
|
bf7ba49013 | ||
|
|
323aa075b3 | ||
|
|
f526014dc8 | ||
|
|
dec546fa65 | ||
|
|
36390a7921 | ||
|
|
313d705778 | ||
|
|
ca34efa2d7 | ||
|
|
25e92cfe39 | ||
|
|
999359e81a | ||
|
|
3a74248746 | ||
|
|
cb828ecbd3 | ||
|
|
e1e984e02d | ||
|
|
d6e19fe350 | ||
|
|
1f91c748b5 | ||
|
|
38356acb4e | ||
|
|
7b2d598c38 | ||
|
|
c276eb0cfa | ||
|
|
571de591ca | ||
|
|
e49a197fd5 | ||
|
|
a30eabc23a | ||
|
|
73c4cddba5 | ||
|
|
6e341f770a | ||
|
|
527145ec81 | ||
|
|
c93edceffe | ||
|
|
6f9eac7c8e | ||
|
|
522420ef34 | ||
|
|
927bf052b0 | ||
|
|
1e44689b79 | ||
|
|
b967865faa | ||
|
|
054c0cafb2 | ||
|
|
29e37c8e2b | ||
|
|
44fc2afdc7 | ||
|
|
7776c8ee38 | ||
|
|
9ee7e1873f | ||
|
|
e21fcbd1bd | ||
|
|
5852ab513d | ||
|
|
3fe33e4e9e | ||
|
|
c44b90b5a4 | ||
|
|
f06dc6548c | ||
|
|
e13552c306 | ||
|
|
0305c3783f | ||
|
|
5158ac3d98 | ||
|
|
25c08b0885 | ||
|
|
f8131c97e9 | ||
|
|
3de1fab66a | ||
|
|
ab664128b7 | ||
|
|
91de693b80 | ||
|
|
a64606fb32 | ||
|
|
58d9103bd2 | ||
|
|
61ab5be12d | ||
|
|
030900b602 | ||
|
|
476d689c7d | ||
|
|
4aedbb69c2 | ||
|
|
db2a68709c | ||
|
|
f114a89136 | ||
|
|
96eda76391 | ||
|
|
e7d9a8fa2d | ||
|
|
1cca8db828 | ||
|
|
2cde665d2f | ||
|
|
d660c6342f | ||
|
|
7e8bb0e51f | ||
|
|
c87f4cc088 | ||
|
|
05c50349a8 | ||
|
|
e985952816 | ||
|
|
19f0ef9c86 | ||
|
|
cc8e13a85f | ||
|
|
6475a05794 | ||
|
|
cc9840afe5 | ||
|
|
b7a2cde458 | ||
|
|
453992b55d | ||
|
|
0b1067f95e | ||
|
|
21777cd95b | ||
|
|
827ad3bdf2 | ||
|
|
7818157cd0 | ||
|
|
d547241714 | ||
|
|
c41e0e9286 | ||
|
|
c2d4784895 | ||
|
|
11163965cf | ||
|
|
e9df065820 | ||
|
|
101ab0c11a | ||
|
|
25f081c0b7 | ||
|
|
700baef094 | ||
|
|
3faa586b16 | ||
|
|
8ca77fe8a4 | ||
|
|
019829cc4d | ||
|
|
a7f6bb223a | ||
|
|
eb77a8f328 | ||
|
|
5a484b2667 | ||
|
|
982c35f8e7 | ||
|
|
adffe5f47f | ||
|
|
f90a194b86 | ||
|
|
99e9356e5d | ||
|
|
860840a4c1 | ||
|
|
ab63b76ae0 | ||
|
|
29bca726b3 | ||
|
|
91297a68f8 | ||
|
|
2bea8ade63 | ||
|
|
ec486cf78c | ||
|
|
63ac378866 | ||
|
|
35db387fc2 | ||
|
|
a0f9baf5e7 | ||
|
|
4e54a79abc | ||
|
|
37bea7158f | ||
|
|
618fe4e959 | ||
|
|
0c73144977 | ||
|
|
ff8c3b1595 | ||
|
|
b756d0d0dc | ||
|
|
23147fafd1 | ||
|
|
b036b5f24b | ||
|
|
3b9014f750 | ||
|
|
6ad7a285e7 | ||
|
|
e529eaed2d | ||
|
|
4697c6c6ad | ||
|
|
56e47c3550 | ||
|
|
b3a9ba339c | ||
|
|
8d0ce37a59 | ||
|
|
a1bbbd6f7b | ||
|
|
de87743c6a | ||
|
|
9d4a72a4ba | ||
|
|
19e39aea49 | ||
|
|
da064a6005 | ||
|
|
a12a38a7a9 | ||
|
|
2c3a442a4c | ||
|
|
25d560cf46 | ||
|
|
c3324cf64c | ||
|
|
053bbe7028 | ||
|
|
74f980437f | ||
|
|
5ef96a29ac | ||
|
|
f261e7aa96 | ||
|
|
8e44e911ca | ||
|
|
6711ba06d9 | ||
|
|
fce69b6bb2 | ||
|
|
1183e2e4bf | ||
|
|
de3082e48f | ||
|
|
3acac34e4b | ||
|
|
f97bd2b357 | ||
|
|
3bac124921 | ||
|
|
ba44603d0f | ||
|
|
358f844c88 | ||
|
|
74bf2d742c | ||
|
|
acba3d5467 | ||
|
|
cfc066c8ea | ||
|
|
4f69f19866 | ||
|
|
c508f88af2 | ||
|
|
9757fdb42f | ||
|
|
24d57f535b | ||
|
|
e42dfc0726 | ||
|
|
3f54f23535 | ||
|
|
c7198b3538 | ||
|
|
827d10dd49 | ||
|
|
1b7a072f25 | ||
|
|
af1347ca17 | ||
|
|
f741cf5b23 | ||
|
|
eb1b3c8729 | ||
|
|
40e4678a45 | ||
|
|
d3c0a60de9 | ||
|
|
83bba80f3b | ||
|
|
44e0cfb878 | ||
|
|
a58e21e313 | ||
|
|
1131635006 | ||
|
|
c6e477e6ab | ||
|
|
18a81120bc | ||
|
|
17cd67f4d0 | ||
|
|
38a40d56a0 | ||
|
|
96fd2e26fd | ||
|
|
581bbc847d | ||
|
|
da7cbc41d2 | ||
|
|
282e22acb9 | ||
|
|
9374eebdf6 | ||
|
|
dcd5c5b39a | ||
|
|
974f8ee244 | ||
|
|
8212aa378a | ||
|
|
403d4c6408 | ||
|
|
142681fa27 | ||
|
|
69c9141817 | ||
|
|
9ed88e7608 | ||
|
|
b216f4d90b | ||
|
|
26ee07d8ba | ||
|
|
a8e5b02fb4 | ||
|
|
80a8912a53 | ||
|
|
1ba6797f25 | ||
|
|
11a2167dea | ||
|
|
af4eeb6e6b | ||
|
|
ea6459e2bd | ||
|
|
2dfa5a9660 | ||
|
|
b86434fbd3 | ||
|
|
223525cc69 | ||
|
|
fd63c079c8 | ||
|
|
ebe4511c57 | ||
|
|
e1a81ef8f3 | ||
|
|
3382c83d6e | ||
|
|
a89430673f | ||
|
|
fcea6dbdb6 | ||
|
|
c744a7d168 | ||
|
|
0256dc8640 | ||
|
|
41819794d7 | ||
|
|
836903cb02 | ||
|
|
7f822d33b5 | ||
|
|
232fdbb1ff | ||
|
|
f3f6111622 | ||
|
|
a8378e7e7f | ||
|
|
eb3165096f | ||
|
|
90ca58cda8 | ||
|
|
5a81526387 | ||
|
|
8df073b8ac | ||
|
|
0f7356b002 | ||
|
|
0c2166fb5f | ||
|
|
d228222fa6 | ||
|
|
e4b7d3244e | ||
|
|
7d0e841a73 | ||
|
|
9859e441e1 | ||
|
|
e1c638439f | ||
|
|
253aaaad97 | ||
|
|
a249ccc12b | ||
|
|
22fb898267 | ||
|
|
e038797875 | ||
|
|
7b9f9e23c0 | ||
|
|
01d062a68f | ||
|
|
a66dfb5faf | ||
|
|
ac1480680a | ||
|
|
13a9b5ca00 | ||
|
|
0cdf6abf0b | ||
|
|
2071694983 | ||
|
|
12e2b18a6f | ||
|
|
28e128756d | ||
|
|
a15109a12c | ||
|
|
e500fb57e8 | ||
|
|
f1849092eb | ||
|
|
f1dbd7e8a6 | ||
|
|
d417f454dd | ||
|
|
d79718d834 | ||
|
|
de9c3a1550 | ||
|
|
90fc7a4ed3 | ||
|
|
09edbc24bc | ||
|
|
92f8701c37 | ||
|
|
c828888770 | ||
|
|
bb7728e7e7 | ||
|
|
5f544f9c78 | ||
|
|
5b6a7d1995 | ||
|
|
b21185dde7 | ||
|
|
deaee0dc82 | ||
|
|
4206346496 | ||
|
|
6658b632b3 | ||
|
|
d9be7160ef | ||
|
|
d56424a287 | ||
|
|
2d397c5cb8 | ||
|
|
08004caa5d | ||
|
|
522358a004 | ||
|
|
e00a6c36e3 | ||
|
|
4664497cbc | ||
|
|
6be424bde5 | ||
|
|
0903438242 | ||
|
|
b874b68e57 | ||
|
|
6af9385c5f | ||
|
|
29398ac33b | ||
|
|
7525739b24 | ||
|
|
50ff71f3f3 | ||
|
|
70a9215c9d | ||
|
|
9c1a5d9a7d | ||
|
|
9a9b4a6892 | ||
|
|
e5502c724e | ||
|
|
125878e280 | ||
|
|
b4c1498ca1 | ||
|
|
88d534a7f2 | ||
|
|
6ce4ed0937 | ||
|
|
1b9ba62dc8 | ||
|
|
f3639e6200 | ||
|
|
1fe56cf401 | ||
|
|
a3add3d816 | ||
|
|
2807de2123 | ||
|
|
5029b956d2 | ||
|
|
815aaefad9 | ||
|
|
7ea740f647 | ||
|
|
eaf25e5b36 | ||
|
|
3b336a9127 | ||
|
|
cc4d1fd1c7 | ||
|
|
17ec6441a0 | ||
|
|
a1b107cecb | ||
|
|
2e06bc2352 | ||
|
|
af0a239bd9 | ||
|
|
92939ca3f2 | ||
|
|
aca51901a1 | ||
|
|
8d15dba26d | ||
|
|
cdca5655fc | ||
|
|
c778fc84ed | ||
|
|
7f72ee1296 | ||
|
|
1981ac0b93 | ||
|
|
a8f2fb4586 | ||
|
|
a69d3d0828 | ||
|
|
40760f9e98 | ||
|
|
b64b16dd67 | ||
|
|
8c2c9bc5df | ||
|
|
3a21cbc72b | ||
|
|
5438fca35a | ||
|
|
a09521ceb1 | ||
|
|
0d6501a926 | ||
|
|
c25f7a119b | ||
|
|
1958c85a96 | ||
|
|
a7ba4418c6 | ||
|
|
d6fcbb85e8 | ||
|
|
278fbf285a | ||
|
|
ca828343e4 | ||
|
|
5c663f9e09 | ||
|
|
9debd76816 | ||
|
|
848679829d | ||
|
|
6727007754 | ||
|
|
03a563c172 | ||
|
|
cfbd54bebf | ||
|
|
7f1e9db0fa | ||
|
|
1367a30a11 | ||
|
|
31b234ee3a | ||
|
|
57dd5e295e | ||
|
|
c188923f1a | ||
|
|
7a8716d38b | ||
|
|
2e77c13297 | ||
|
|
d5279d881d | ||
|
|
34e9cc1944 | ||
|
|
2a7498e30e | ||
|
|
4689d09e1f | ||
|
|
b818a38307 | ||
|
|
7e5d869472 | ||
|
|
3eaf31fd48 | ||
|
|
fe5e22f5ae | ||
|
|
61da583080 | ||
|
|
94dfe1a0cd | ||
|
|
412dbadafd | ||
|
|
8c5e4e0b09 | ||
|
|
2ac6072d80 | ||
|
|
ef4591c4fc | ||
|
|
22dfbab09b | ||
|
|
37f595c480 | ||
|
|
1fc951037d | ||
|
|
affd46dd88 | ||
|
|
cfaff3df04 | ||
|
|
ce2451971d | ||
|
|
8cf5d0efbd | ||
|
|
f61d61223d | ||
|
|
6b6eb50f9a | ||
|
|
89ab66335f | ||
|
|
5bc4e95515 | ||
|
|
893f05e401 | ||
|
|
4abc8ce34c | ||
|
|
34d2c610bf | ||
|
|
1492a8a0bc | ||
|
|
388d616048 | ||
|
|
28589f5a83 | ||
|
|
e7a80f7bfb | ||
|
|
ea47e0ac05 | ||
|
|
09d204038f | ||
|
|
47cb0afac2 | ||
|
|
8e2e7f44d3 | ||
|
|
8c7702deda | ||
|
|
bdc1ca01cd | ||
|
|
dca58d6663 | ||
|
|
a0cf4b97c0 | ||
|
|
a1c239260f | ||
|
|
a8a2cf54a5 | ||
|
|
d5ba80da55 | ||
|
|
3f2da04763 | ||
|
|
e092f50645 | ||
|
|
7f698bd690 | ||
|
|
7fe04b9944 | ||
|
|
2671714df3 | ||
|
|
630e275d99 | ||
|
|
614f10432e | ||
|
|
223b5e152b | ||
|
|
ec55cd2465 | ||
|
|
c59510f921 | ||
|
|
0f5f481213 | ||
|
|
b40fa45fd3 | ||
|
|
8faaf35da0 | ||
|
|
ce0f79af16 | ||
|
|
faa420f9fd | ||
|
|
aab519177d | ||
|
|
5116ad7c44 | ||
|
|
7305e911e5 | ||
|
|
b2f670acf6 | ||
|
|
dc040aa693 | ||
|
|
9b7a8494b0 | ||
|
|
ae6c1bb8eb | ||
|
|
a9a4f0ea07 | ||
|
|
68af5940e3 | ||
|
|
9df5313da4 | ||
|
|
ba3f00e64e | ||
|
|
4d7a6d5c70 | ||
|
|
aef833c3f5 | ||
|
|
6f58fee29b | ||
|
|
dda09ddbcb | ||
|
|
8b13fe6eb4 | ||
|
|
21f345a96a | ||
|
|
eaa4dc63bf | ||
|
|
af5ea2188b | ||
|
|
7f23a4c964 | ||
|
|
345e04c956 | ||
|
|
2a138102fc | ||
|
|
ef5e8f00f8 | ||
|
|
badb73a413 | ||
|
|
2aced95c86 | ||
|
|
720989e829 | ||
|
|
718031565e | ||
|
|
ec7b46b779 | ||
|
|
270c36b29a | ||
|
|
bc2eb53bb2 | ||
|
|
afe7b8523c | ||
|
|
a7743a4314 | ||
|
|
ba74fdc841 | ||
|
|
41c047e12a | ||
|
|
f4fc055405 | ||
|
|
2eb6fcfbf5 | ||
|
|
c665e1a2d6 | ||
|
|
bb7cdafe47 | ||
|
|
95fcfadb17 | ||
|
|
1ef47531c8 | ||
|
|
9589b641b6 | ||
|
|
63463bda64 | ||
|
|
b642412639 | ||
|
|
21f9b73cb4 | ||
|
|
b73e5432f3 | ||
|
|
de5cc9b0bf | ||
|
|
08b38127d3 | ||
|
|
383804b7f1 | ||
|
|
20bf80910e | ||
|
|
29a2014745 | ||
|
|
40f6ee236f | ||
|
|
5551cbd11f | ||
|
|
9e84a05325 | ||
|
|
558e990907 | ||
|
|
c2e88bb343 | ||
|
|
b7582397fe | ||
|
|
3e7b8615ab | ||
|
|
6f5d8c5372 | ||
|
|
c116d75408 | ||
|
|
bb4ee4e77d | ||
|
|
fc0e46988c | ||
|
|
c71b93c3a7 | ||
|
|
2c6b79c17d | ||
|
|
f264838ec5 | ||
|
|
54e7d10226 | ||
|
|
f8d3246005 | ||
|
|
7856e8c5f2 | ||
|
|
69bafe332f | ||
|
|
e2b24a20d2 | ||
|
|
d2d1771fd3 | ||
|
|
6c5e3eb3f3 | ||
|
|
d99dbd5878 | ||
|
|
6d4894458a | ||
|
|
bc367a1297 | ||
|
|
31d1074ee0 | ||
|
|
cd18a87b8c | ||
|
|
8279a3bce9 | ||
|
|
93524898cc | ||
|
|
e9319060f6 | ||
|
|
4322478a4a | ||
|
|
00262c767e | ||
|
|
c8030e1500 | ||
|
|
3717b90444 | ||
|
|
d003bdb765 | ||
|
|
8b246ac334 | ||
|
|
9df543a6bb | ||
|
|
77befc1092 | ||
|
|
4417771315 | ||
|
|
04fa6ec1d8 | ||
|
|
5e8bdcb1f6 | ||
|
|
07d99763d3 | ||
|
|
87a2051b24 | ||
|
|
0eddc876f1 | ||
|
|
1a67a1397b | ||
|
|
dc6bf9d0cf | ||
|
|
84f8bf007e | ||
|
|
6a2a66c165 | ||
|
|
f491d559e3 | ||
|
|
5d9bdc303c | ||
|
|
f7f0ecddd4 | ||
|
|
f0f03e2440 | ||
|
|
1f78264e9f | ||
|
|
2aad5319f9 | ||
|
|
479b858398 | ||
|
|
34b8bfd1b2 | ||
|
|
408036b09c | ||
|
|
a45ec1cb84 | ||
|
|
8decf1852f | ||
|
|
350fac21f6 | ||
|
|
cedd386eee | ||
|
|
5751765d66 | ||
|
|
7293c071bd | ||
|
|
98cf8f4f04 | ||
|
|
c61ccd1c27 | ||
|
|
6b3d0efa56 | ||
|
|
164578f1c8 | ||
|
|
938fe956cf | ||
|
|
b44d402c1d | ||
|
|
fbaa511813 | ||
|
|
f000594c62 | ||
|
|
71ba94063e | ||
|
|
b025a8c966 | ||
|
|
c36aab132b | ||
|
|
1e7a47ed37 | ||
|
|
87f544774e | ||
|
|
db9ee5f03a | ||
|
|
d0f5d69157 | ||
|
|
742c7a78bc | ||
|
|
918d3a6c23 | ||
|
|
1f9b304eec |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -7,4 +7,7 @@ prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
slides/*.yml.html
|
||||
autotest/nextstep
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
node_modules
|
||||
|
||||
24
CHECKLIST.md
Normal file
24
CHECKLIST.md
Normal file
@@ -0,0 +1,24 @@
|
||||
Checklist to use when delivering a workshop
|
||||
Authored by Jérôme; additions by Bridget
|
||||
|
||||
- [ ] Create event-named branch (such as `conferenceYYYY`) in the [main repo](https://github.com/jpetazzo/container.training/)
|
||||
- [ ] Create file `slides/_redirects` containing a link to the desired tutorial: `/ /kube-halfday.yml.html 200`
|
||||
- [ ] Push local branch to GitHub and merge into main repo
|
||||
- [ ] [Netlify setup](https://app.netlify.com/sites/container-training/settings/domain): create subdomain for event-named branch
|
||||
- [ ] Add link to event-named branch to [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] Update the slides that says which versions we are using for [kube](https://github.com/jpetazzo/container.training/blob/master/slides/kube/versions-k8s.md) or [swarm](https://github.com/jpetazzo/container.training/blob/master/slides/swarm/versions.md) workshops
|
||||
- [ ] Update the version of Compose and Machine in [settings](https://github.com/jpetazzo/container.training/tree/master/prepare-vms/settings)
|
||||
- [ ] (optional) Create chatroom
|
||||
- [ ] (optional) Set chatroom in YML ([kube half-day example](https://github.com/jpetazzo/container.training/blob/master/slides/kube-halfday.yml#L6-L8)) and deploy
|
||||
- [ ] (optional) Put chat link on [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] How many VMs do we need? Check with event organizers ahead of time
|
||||
- [ ] Provision VMs (slightly more than we think we'll need)
|
||||
- [ ] Change password on presenter's VMs (to forestall any hijinx)
|
||||
- [ ] Onsite: walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] Print cards
|
||||
- [ ] Cut cards
|
||||
- [ ] Last-minute merge from master
|
||||
- [ ] Check that all looks good
|
||||
- [ ] DELIVER!
|
||||
- [ ] Shut down VMs
|
||||
- [ ] Update index.html to remove chat link and move session to past things
|
||||
19
LICENSE
19
LICENSE
@@ -1,13 +1,12 @@
|
||||
Copyright 2015 Jérôme Petazzoni
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
The code in this repository is licensed under the Apache License
|
||||
Version 2.0. You may obtain a copy of this license at:
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
The instructions and slides in this repository (e.g. the files
|
||||
with extension .md and .yml in the "slides" subdirectory) are
|
||||
under the Creative Commons Attribution 4.0 International Public
|
||||
License. You may obtain a copy of this license at:
|
||||
|
||||
https://creativecommons.org/licenses/by/4.0/legalcode
|
||||
|
||||
|
||||
52
README.md
52
README.md
@@ -39,14 +39,16 @@ your own tutorials.
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- some [common slides](slides/common/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- a [semi-automated test harness](slides/autopilot/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a [PhantomJS script](slides/slidechecker.js) to check
|
||||
that the slides look good and don't have formatting issues;
|
||||
- [deployment scripts](prepare-vms/) to start training
|
||||
VMs in bulk;
|
||||
- a [semi-automated test harness](autotest/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a fancy pipeline powered by
|
||||
[Netlify](https://www.netlify.com/) and continuously
|
||||
deploying `master` to http://container.training/.
|
||||
@@ -74,9 +76,6 @@ a few other contributors. It is actively maintained.
|
||||
|
||||
## Repository structure
|
||||
|
||||
- [autotest](autotest/)
|
||||
- Semi-automated testing system to check that all the exercises
|
||||
in the slides work properly.
|
||||
- [bin](bin/)
|
||||
- A few helper scripts that you can safely ignore for now.
|
||||
- [dockercoins](dockercoins/)
|
||||
@@ -248,6 +247,17 @@ content but you also know to skip during presentation.
|
||||
- Last 15-30 minutes is for stateful services, DAB files, and questions.
|
||||
|
||||
|
||||
### Pre-built images
|
||||
|
||||
There are pre-built images for the 4 components of the DockerCoins demo app: `dockercoins/hasher:v0.1`, `dockercoins/rng:v0.1`, `dockercoins/webui:v0.1`, and `dockercoins/worker:v0.1`. They correspond to the code in this repository.
|
||||
|
||||
There are also three variants, for demo purposes:
|
||||
|
||||
- `dockercoins/rng:v0.2` is broken (the server won't even start),
|
||||
- `dockercoins/webui:v0.2` has bigger font on the Y axis and a green graph (instead of blue),
|
||||
- `dockercoins/worker:v0.2` is 11x slower than `v0.1`.
|
||||
|
||||
|
||||
## Past events
|
||||
|
||||
Since its inception, this workshop has been delivered dozens of times,
|
||||
@@ -282,15 +292,31 @@ If there is a bug and you can't even reproduce it:
|
||||
sorry. It is probably an Heisenbug. We can't act on it
|
||||
until it's reproducible, alas.
|
||||
|
||||
If you have attended this workshop and have feedback,
|
||||
or if you want somebody to deliver that workshop at your
|
||||
conference or for your company: you can contact one of us!
|
||||
|
||||
- jerome at docker dot com
|
||||
# “Please teach us!”
|
||||
|
||||
If you have attended one of these workshops, and want
|
||||
your team or organization to attend a similar one, you
|
||||
can look at the list of upcoming events on
|
||||
http://container.training/.
|
||||
|
||||
You are also welcome to reuse these materials to run
|
||||
your own workshop, for your team or even at a meetup
|
||||
or conference. In that case, you might enjoy watching
|
||||
[Bridget Kromhout's talk at KubeCon 2018 Europe](
|
||||
https://www.youtube.com/watch?v=mYsp_cGY2O0), explaining
|
||||
precisely how to run such a workshop yourself.
|
||||
|
||||
Finally, you can also contact the following persons,
|
||||
who are experienced speakers, are familiar with the
|
||||
material, and are available to deliver these workshops
|
||||
at your conference or for your company:
|
||||
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!
|
||||
(If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!)
|
||||
|
||||
**Thank you!**
|
||||
|
||||
|
||||
@@ -1,229 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import uuid
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
# A "snippet" is something that the user is supposed to do in the workshop.
|
||||
# Most of the "snippets" are shell commands.
|
||||
# Some of them can be key strokes or other actions.
|
||||
# In the markdown source, they are the code sections (identified by triple-
|
||||
# quotes) within .exercise[] sections.
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
logging.warning("Exercise on slide {} does not have any ``` snippet."
|
||||
.format(self.number))
|
||||
self.debug()
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
def debug(self):
|
||||
logging.debug("\n{}\n{}\n{}".format(hrule(), self.content, hrule()))
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
|
||||
def wait_for_string(s):
|
||||
logging.debug("Waiting for string: {}".format(s))
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
if s in output:
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for {}!".format(s))
|
||||
|
||||
|
||||
def wait_for_prompt():
|
||||
logging.debug("Waiting for prompt.")
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
# If we are not at the bottom of the screen, there will be a bunch of extra \n's
|
||||
output = output.rstrip('\n')
|
||||
if output[-2:] == "\n$":
|
||||
return
|
||||
time.sleep(1)
|
||||
raise Exception("Timed out while waiting for prompt!")
|
||||
|
||||
|
||||
def check_exit_status():
|
||||
token = uuid.uuid4().hex
|
||||
data = "echo {} $?\n".format(token)
|
||||
logging.debug("Sending {!r} to get exit status.".format(data))
|
||||
send_keys(data)
|
||||
time.sleep(0.5)
|
||||
wait_for_prompt()
|
||||
screen = capture_pane()
|
||||
status = re.findall("\n{} ([0-9]+)\n".format(token), screen, re.MULTILINE)
|
||||
logging.debug("Got exit status: {}.".format(status))
|
||||
if len(status) == 0:
|
||||
raise Exception("Couldn't retrieve status code {}. Timed out?".format(token))
|
||||
if len(status) > 1:
|
||||
raise Exception("More than one status code {}. I'm seeing double! Shoot them both.".format(token))
|
||||
code = int(status[0])
|
||||
if code != 0:
|
||||
raise Exception("Non-zero exit status: {}.".format(code))
|
||||
# Otherwise just return peacefully.
|
||||
|
||||
|
||||
slides = []
|
||||
content = open(sys.argv[1]).read()
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slides.append(Slide(slide))
|
||||
|
||||
actions = []
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Extract the "method" (e.g. bash, keys, ...)
|
||||
# On multi-line snippets, the method is alone on the first line
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
method, data = content.split('\n', 1)
|
||||
else:
|
||||
method, data = content.split(' ', 1)
|
||||
actions.append((slide, snippet, method, data))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"])
|
||||
|
||||
|
||||
try:
|
||||
i = int(open("nextstep").read())
|
||||
logging.info("Loaded next step ({}) from file.".format(i))
|
||||
except Exception as e:
|
||||
logging.warning("Could not read nextstep file ({}), initializing to 0.".format(e))
|
||||
i = 0
|
||||
|
||||
interactive = True
|
||||
|
||||
while i < len(actions):
|
||||
with open("nextstep", "w") as f:
|
||||
f.write(str(i))
|
||||
slide, snippet, method, data = actions[i]
|
||||
|
||||
# Remove extra spaces (we don't want them in the terminal) and carriage returns
|
||||
data = data.strip()
|
||||
|
||||
print(hrule())
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
print(hrule())
|
||||
if interactive:
|
||||
print("[{}/{}] Shall we execute that snippet above?".format(i, len(actions)))
|
||||
print("(ENTER to execute, 'c' to continue until next error, N to jump to step #N)")
|
||||
command = raw_input("> ")
|
||||
else:
|
||||
command = ""
|
||||
|
||||
# For now, remove the `highlighted` sections
|
||||
# (Make sure to use $() in shell snippets!)
|
||||
if '`' in data:
|
||||
logging.info("Stripping ` from snippet.")
|
||||
data = data.replace('`', '')
|
||||
|
||||
if command == "c":
|
||||
# continue until next timeout
|
||||
interactive = False
|
||||
elif command.isdigit():
|
||||
i = int(command)
|
||||
elif command == "":
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash":
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
_, _, next_method, next_data = actions[i+1]
|
||||
if next_method == "wait":
|
||||
wait_for_string(next_data)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code FIXME should be optional
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
i += 1
|
||||
|
||||
else:
|
||||
i += 1
|
||||
logging.warning("Unknown command {}, skipping to next step.".format(command))
|
||||
|
||||
# Reset slide counter
|
||||
with open("nextstep", "w") as f:
|
||||
f.write(str(0))
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=80)
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
|
||||
|
||||
62
k8s/consul.yaml
Normal file
62
k8s/consul.yaml
Normal file
@@ -0,0 +1,62 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.2.2"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
28
k8s/docker-build.yaml
Normal file
28
k8s/docker-build.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||
222
k8s/efk.yaml
Normal file
222
k8s/efk.yaml
Normal file
@@ -0,0 +1,222 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
# X-Pack Authentication
|
||||
# =====================
|
||||
- name: FLUENT_ELASTICSEARCH_USER
|
||||
value: "elastic"
|
||||
- name: FLUENT_ELASTICSEARCH_PASSWORD
|
||||
value: "changeme"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: elasticsearch
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5.6.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: elasticsearch
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /api/v1/namespaces/default/services/elasticsearch
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
run: elasticsearch
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: kibana
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5.6.8
|
||||
imagePullPolicy: Always
|
||||
name: kibana
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
name: kibana
|
||||
selfLink: /api/v1/namespaces/default/services/kibana
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
run: kibana
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
14
k8s/grant-admin-to-dashboard.yaml
Normal file
14
k8s/grant-admin-to-dashboard.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
18
k8s/haproxy.cfg
Normal file
18
k8s/haproxy.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend the-frontend
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server bing.com-80 bing.com:80 maxconn 32 check
|
||||
|
||||
16
k8s/haproxy.yaml
Normal file
16
k8s/haproxy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
14
k8s/ingress.yaml
Normal file
14
k8s/ingress.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
29
k8s/kaniko-build.yaml
Normal file
29
k8s/kaniko-build.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--skip-tls-verify"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
|
||||
167
k8s/kubernetes-dashboard.yaml
Normal file
167
k8s/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-testcurl-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
run: testcurl
|
||||
|
||||
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-all-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
ingress: []
|
||||
|
||||
22
k8s/netpol-dockercoins.yaml
Normal file
22
k8s/netpol-dockercoins.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
21
k8s/nginx-with-volume.yaml
Normal file
21
k8s/nginx-with-volume.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
580
k8s/portworx.yaml
Normal file
580
k8s/portworx.yaml
Normal file
@@ -0,0 +1,580 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: stork-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.cfg: |-
|
||||
{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"extenders": [
|
||||
{
|
||||
"urlPrefix": "http://stork-service.kube-system.svc:8099",
|
||||
"apiVersion": "v1beta1",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 5,
|
||||
"enableHttps": false,
|
||||
"nodeCacheCapable": false
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "list", "watch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["deployments", "deployments/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["statefulsets", "statefulsets/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: stork-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
name: stork
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8099
|
||||
targetPort: 8099
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
tier: control-plane
|
||||
name: stork
|
||||
namespace: kube-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
name: stork
|
||||
tier: control-plane
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /stork
|
||||
- --driver=pxd
|
||||
- --verbose
|
||||
- --leader-elect=true
|
||||
- --health-monitor-interval=120
|
||||
imagePullPolicy: Always
|
||||
image: openstorage/stork:1.1.3
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
name: stork
|
||||
hostPID: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
serviceAccountName: stork-account
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-snapshot-sc
|
||||
provisioner: stork-snapshot
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resourceNames: ["kube-scheduler"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["delete", "get", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["bindings", "pods/binding"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers", "services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["app", "extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-scheduler-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
name: stork-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=true
|
||||
- --scheduler-name=stork
|
||||
- --policy-configmap=stork-config
|
||||
- --policy-configmap-namespace=kube-system
|
||||
- --lock-object-name=stork-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
name: stork-scheduler
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork-scheduler
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
hostPID: false
|
||||
serviceAccountName: stork-scheduler-account
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: portworx-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
selector:
|
||||
name: portworx
|
||||
ports:
|
||||
- name: px-api
|
||||
protocol: TCP
|
||||
port: 9001
|
||||
targetPort: 9001
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-get-put-list-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "get", "update", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "update", "create"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: node-get-put-list-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: portworx
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role
|
||||
namespace: portworx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role-binding
|
||||
namespace: portworx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: portworx
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop0&c=px-workshop&stork=true&lh=true"
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: px/enabled
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
containers:
|
||||
- name: portworx
|
||||
image: portworx/oci-monitor:1.4.2.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
["-c", "px-workshop", "-s", "/dev/loop0", "-b",
|
||||
"-x", "kubernetes"]
|
||||
env:
|
||||
- name: "PX_TEMPLATE_VERSION"
|
||||
value: "v4"
|
||||
|
||||
livenessProbe:
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 840 # allow image pull in slow networks
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 9001
|
||||
readinessProbe:
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 9015
|
||||
terminationMessagePath: "/tmp/px-termination-log"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
mountPath: /etc/pwx
|
||||
- name: optpwx
|
||||
mountPath: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
mountPath: /host_proc/1/ns
|
||||
- name: sysdmount
|
||||
mountPath: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
mountPath: /var/cores
|
||||
- name: journalmount1
|
||||
mountPath: /var/run/log
|
||||
readOnly: true
|
||||
- name: journalmount2
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
- name: dbusmount
|
||||
mountPath: /var/run/dbus
|
||||
restartPolicy: Always
|
||||
serviceAccountName: px-account
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
hostPath:
|
||||
path: /etc/pwx
|
||||
- name: optpwx
|
||||
hostPath:
|
||||
path: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: sysdmount
|
||||
hostPath:
|
||||
path: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
hostPath:
|
||||
path: /var/cores
|
||||
- name: journalmount1
|
||||
hostPath:
|
||||
path: /var/run/log
|
||||
- name: journalmount2
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dbusmount
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-lh-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
nodePort: 32678
|
||||
- name: https
|
||||
port: 443
|
||||
nodePort: 32679
|
||||
selector:
|
||||
tier: px-web-console
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: px-web-console
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "init"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
containers:
|
||||
- name: px-lighthouse
|
||||
image: portworx/px-lighthouse:1.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
- name: config-sync
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "sync"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
serviceAccountName: px-lh-account
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
30
k8s/postgres.yaml
Normal file
30
k8s/postgres.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
15
k8s/registry.yaml
Normal file
15
k8s/registry.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
|
||||
67
k8s/socat.yaml
Normal file
67
k8s/socat.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
100
k8s/traefik.yaml
Normal file
100
k8s/traefik.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
@@ -1,15 +1,22 @@
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS or Azure
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- set required environment variables for AWS
|
||||
- set required environment variables
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
|
||||
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
|
||||
|
||||
@@ -35,6 +42,16 @@ The Docker Compose file here is used to build a image with all the dependencies
|
||||
- `AWS_SECRET_ACCESS_KEY`
|
||||
- `AWS_DEFAULT_REGION`
|
||||
|
||||
If you're not using AWS, set these to placeholder values:
|
||||
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="foo"
|
||||
export AWS_SECRET_ACCESS_KEY="foo"
|
||||
export AWS_DEFAULT_REGION="foo"
|
||||
```
|
||||
|
||||
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
|
||||
|
||||
### Update/copy `settings/example.yaml`
|
||||
|
||||
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc.
|
||||
@@ -48,6 +65,7 @@ workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a batch of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
@@ -55,6 +73,7 @@ help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available batches in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
@@ -63,6 +82,7 @@ start Start a batch of VMs
|
||||
status List instance status for a given batch
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a batch of VMs
|
||||
wrap Run this program in a container
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -73,21 +93,82 @@ test Run tests (pre-flight checks) on a batch of VMs
|
||||
- The `./workshopctl` script can be executed directly.
|
||||
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a Batch of Instances for a Workshop
|
||||
### Example Steps to Launch a Batch of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start N` Creates `N` EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `scripts/postprep.rc` via parallel-ssh
|
||||
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `lib/postprep.py` via parallel-ssh
|
||||
- If it errors or times out, you should be able to rerun
|
||||
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
|
||||
- Run `./workshopctl pull-images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students
|
||||
- *Have a great workshop*
|
||||
- Run `./workshopctl stop TAG` to terminate instances.
|
||||
|
||||
### Example Steps to Launch Azure Instances
|
||||
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account
|
||||
- Customize `azuredeploy.parameters.json`
|
||||
- Required:
|
||||
- Provide the SSH public key you plan to use for instance configuration
|
||||
- Optional:
|
||||
- Choose a name for the workshop (default is "workshop")
|
||||
- Choose the number of instances (default is 3)
|
||||
- Customize the desired instance size (default is Standard_D1_v2)
|
||||
- Launch instances with your chosen resource group name and your preferred region; the examples are "workshop" and "eastus":
|
||||
```
|
||||
az group create --name workshop --location eastus
|
||||
az group deployment create --resource-group workshop --template-file azuredeploy.json --parameters @azuredeploy.parameters.json
|
||||
```
|
||||
|
||||
The `az group deployment create` command can take several minutes and will only say `- Running ..` until it completes, unless you increase the verbosity with `--verbose` or `--debug`.
|
||||
|
||||
To display the IPs of the instances you've launched:
|
||||
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output table
|
||||
```
|
||||
|
||||
If you want to put the IPs into `prepare-vms/tags/<tag>/ips.txt` for a tag of "myworkshop":
|
||||
|
||||
1) If you haven't yet installed `jq` and/or created your event's tags directory in `prepare-vms`:
|
||||
|
||||
```
|
||||
brew install jq
|
||||
mkdir -p tags/myworkshop
|
||||
```
|
||||
|
||||
2) And then generate the IP list:
|
||||
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output json | jq -r '.[].virtualMachine.network.publicIpAddresses[].ipAddress' > tags/myworkshop/ips.txt
|
||||
```
|
||||
|
||||
After the workshop is over, remove the instances:
|
||||
|
||||
```
|
||||
az group delete --resource-group workshop
|
||||
```
|
||||
|
||||
### Example Steps to Configure Instances from a non-AWS Source
|
||||
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to ssh into them.
|
||||
- Set placeholder values for [AWS environment variable settings](#required-environment-variables).
|
||||
- Choose a tag. It could be an event name, datestamp, etc. Ensure you have created a directory for your tag: `prepare-vms/tags/<tag>/`
|
||||
- If you have not already generated a file with the IPs to be configured:
|
||||
- The file should be named `prepare-vms/tags/<tag>/ips.txt`
|
||||
- Format is one IP per line, no other info needed.
|
||||
- Ensure the settings file is as desired (especially the number of nodes): `prepare-vms/settings/kube101.yaml`
|
||||
- For a tag called `myworkshop`, configure instances: `workshopctl deploy myworkshop settings/kube101.yaml`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: `workshopctl kube myworkshop`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: `workshopctl kubetest myworkshop`
|
||||
- Generate cards to print and hand out: `workshopctl cards myworkshop settings/kube101.yaml`
|
||||
- Print the cards file: `prepare-vms/tags/myworkshop/ips.html`
|
||||
|
||||
|
||||
## Other Tools
|
||||
|
||||
### Deploying your SSH key to all the machines
|
||||
@@ -97,13 +178,6 @@ test Run tests (pre-flight checks) on a batch of VMs
|
||||
- Run `pcopykey`.
|
||||
|
||||
|
||||
### Installing extra packages
|
||||
|
||||
- Source `postprep.rc`.
|
||||
(This will install a few extra packages, add entries to
|
||||
/etc/hosts, generate SSH keys, and deploy them on all hosts.)
|
||||
|
||||
|
||||
## Even More Details
|
||||
|
||||
#### Sync of SSH keys
|
||||
@@ -132,16 +206,20 @@ Instances can be deployed manually using the `deploy` command:
|
||||
|
||||
$ ./workshopctl deploy TAG settings/somefile.yaml
|
||||
|
||||
The `postprep.rc` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
|
||||
#### Pre-pull images
|
||||
|
||||
$ ./workshopctl pull-images TAG
|
||||
$ ./workshopctl pull_images TAG
|
||||
|
||||
#### Generate cards
|
||||
|
||||
$ ./workshopctl cards TAG settings/somefile.yaml
|
||||
|
||||
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
|
||||
|
||||
If you don't have `wkhtmltopdf` installed, you will get a warning that it is a missing dependency. If you plan to just print the HTML cards, you can ignore this.
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list
|
||||
|
||||
250
prepare-vms/azuredeploy.json
Normal file
250
prepare-vms/azuredeploy.json
Normal file
@@ -0,0 +1,250 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"workshopName": {
|
||||
"type": "string",
|
||||
"defaultValue": "workshop",
|
||||
"metadata": {
|
||||
"description": "Workshop name."
|
||||
}
|
||||
},
|
||||
"vmPrefix": {
|
||||
"type": "string",
|
||||
"defaultValue": "node",
|
||||
"metadata": {
|
||||
"description": "Prefix for VM names."
|
||||
}
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"type": "int",
|
||||
"defaultValue": 3,
|
||||
"metadata": {
|
||||
"description": "Number of VMs to create."
|
||||
}
|
||||
},
|
||||
"adminUsername": {
|
||||
"type": "string",
|
||||
"defaultValue": "ubuntu",
|
||||
"metadata": {
|
||||
"description": "Admin username for VMs."
|
||||
}
|
||||
},
|
||||
"sshKeyData": {
|
||||
"type": "string",
|
||||
"defaultValue": "",
|
||||
"metadata": {
|
||||
"description": "SSH rsa public key file as a string."
|
||||
}
|
||||
},
|
||||
"imagePublisher": {
|
||||
"type": "string",
|
||||
"defaultValue": "Canonical",
|
||||
"metadata": {
|
||||
"description": "OS image publisher; default Canonical."
|
||||
}
|
||||
},
|
||||
"imageOffer": {
|
||||
"type": "string",
|
||||
"defaultValue": "UbuntuServer",
|
||||
"metadata": {
|
||||
"description": "The name of the image offer. The default is Ubuntu"
|
||||
}
|
||||
},
|
||||
"imageSKU": {
|
||||
"type": "string",
|
||||
"defaultValue": "16.04-LTS",
|
||||
"metadata": {
|
||||
"description": "Version of the image. The default is 16.04-LTS"
|
||||
}
|
||||
},
|
||||
"vmSize": {
|
||||
"type": "string",
|
||||
"defaultValue": "Standard_D1_v2",
|
||||
"metadata": {
|
||||
"description": "VM Size."
|
||||
}
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]",
|
||||
"subnet1Ref": "[concat(variables('vnetID'),'/subnets/',variables('subnet1Name'))]",
|
||||
"vmName": "[parameters('vmPrefix')]",
|
||||
"sshKeyPath": "[concat('/home/',parameters('adminUsername'),'/.ssh/authorized_keys')]",
|
||||
"publicIPAddressName": "PublicIP",
|
||||
"publicIPAddressType": "Dynamic",
|
||||
"virtualNetworkName": "MyVNET",
|
||||
"netSecurityGroup": "MyNSG",
|
||||
"addressPrefix": "10.0.0.0/16",
|
||||
"subnet1Name": "subnet-1",
|
||||
"subnet1Prefix": "10.0.0.0/24",
|
||||
"nicName": "myVMNic"
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "[concat(variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "publicIPLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "[variables('publicIPAddressType')]"
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"name": "[variables('virtualNetworkName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkSecurityGroups/', variables('netSecurityGroup'))]"
|
||||
],
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"[variables('addressPrefix')]"
|
||||
]
|
||||
},
|
||||
"subnets": [
|
||||
{
|
||||
"name": "[variables('subnet1Name')]",
|
||||
"properties": {
|
||||
"addressPrefix": "[variables('subnet1Prefix')]",
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('netSecurityGroup'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "[concat(variables('nicName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "nicLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "ipconfig1",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'), copyIndex(1)))]"
|
||||
},
|
||||
"subnet": {
|
||||
"id": "[variables('subnet1Ref')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-12-01",
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "vmLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'), copyIndex(1))]"
|
||||
],
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "[parameters('vmSize')]"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"adminUsername": "[parameters('adminUsername')]",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "[variables('sshKeyPath')]",
|
||||
"keyData": "[parameters('sshKeyData')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"osDisk": {
|
||||
"createOption": "FromImage"
|
||||
},
|
||||
"imageReference": {
|
||||
"publisher": "[parameters('imagePublisher')]",
|
||||
"offer": "[parameters('imageOffer')]",
|
||||
"sku": "[parameters('imageSKU')]",
|
||||
"version": "latest"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('nicName'),copyIndex(1)))]"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"name": "[variables('netSecurityGroup')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
},
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{
|
||||
"name": "default-open-ports",
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "*",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 1000,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"resourceID": {
|
||||
"type": "string",
|
||||
"value": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'),'1'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
18
prepare-vms/azuredeploy.parameters.json
Normal file
18
prepare-vms/azuredeploy.parameters.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"sshKeyData": {
|
||||
"value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXTIl/M9oeSlcsC5Rfe+nZr4Jc4sl200pSw2lpdxlZ3xzeP15NgSSMJnigUrKUXHfqRQ+2wiPxEf0Odz2GdvmXvR0xodayoOQsO24AoERjeSBXCwqITsfp1bGKzMb30/3ojRBo6LBR6r1+lzJYnNCGkT+IQwLzRIpm0LCNz1j08PUI2aZ04+mcDANvHuN/hwi/THbLLp6SNWN43m9r02RcC6xlCNEhJi4wk4VzMzVbSv9RlLGST2ocbUHwmQ2k9OUmpzoOx73aQi9XNnEaFh2w/eIdXM75VtkT3mRryyykg9y0/hH8/MVmIuRIdzxHQqlm++DLXVH5Ctw6a4kS+ki7 workshop"
|
||||
},
|
||||
"workshopName": {
|
||||
"value": "workshop"
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"value": 3
|
||||
},
|
||||
"vmSize": {
|
||||
"value": "Standard_D1_v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,9 @@
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
@@ -83,7 +85,7 @@ img {
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">training</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
|
||||
5
prepare-vms/clusters.csv
Normal file
5
prepare-vms/clusters.csv
Normal file
@@ -0,0 +1,5 @@
|
||||
Put your initials in the first column to "claim" a cluster.
|
||||
Initials{% for node in clusters[0] %} node{{ loop.index }}{% endfor %}
|
||||
{% for cluster in clusters -%}
|
||||
{%- for node in cluster %} {{ node|trim }}{% endfor %}
|
||||
{% endfor %}
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 42.
|
21
prepare-vms/cncsetup.sh
Normal file
21
prepare-vms/cncsetup.sh
Normal file
@@ -0,0 +1,21 @@
|
||||
#!/bin/sh
|
||||
if [ $(whoami) != ubuntu ]; then
|
||||
echo "This script should be executed on a freshly deployed node,"
|
||||
echo "with the 'ubuntu' user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
pip install --user awscli jinja2 pdfkit
|
||||
sudo apt-get install -y wkhtmltopdf xvfb
|
||||
tmux new-session \; send-keys "
|
||||
[ -f ~/.ssh/id_rsa ] || ssh-keygen
|
||||
|
||||
eval \$(ssh-agent)
|
||||
ssh-add
|
||||
Xvfb :0 &
|
||||
export DISPLAY=:0
|
||||
mkdir -p ~/www
|
||||
sudo docker run -d -p 80:80 -v \$HOME/www:/usr/share/nginx/html nginx
|
||||
"
|
||||
@@ -7,7 +7,6 @@ services:
|
||||
working_dir: /root/prepare-vms
|
||||
volumes:
|
||||
- $HOME/.aws/:/root/.aws/
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- $SSH_AUTH_SOCK:$SSH_AUTH_SOCK
|
||||
- $PWD/:/root/prepare-vms/
|
||||
environment:
|
||||
@@ -15,5 +14,6 @@ services:
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
|
||||
AWS_DEFAULT_REGION: ${AWS_DEFAULT_REGION}
|
||||
AWS_INSTANCE_TYPE: ${AWS_INSTANCE_TYPE}
|
||||
USER: ${USER}
|
||||
entrypoint: /root/prepare-vms/workshopctl
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
_ERR() {
|
||||
error "Command $BASH_COMMAND failed (exit status: $?)"
|
||||
}
|
||||
set -e
|
||||
set -eE
|
||||
trap _ERR ERR
|
||||
|
||||
die() {
|
||||
|
||||
@@ -39,13 +39,16 @@ _cmd_cards() {
|
||||
need_tag $TAG
|
||||
need_settings $SETTINGS
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt
|
||||
# If you're not using AWS, populate the ips.txt file manually
|
||||
if [ ! -f tags/$TAG/ips.txt ]; then
|
||||
aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt
|
||||
fi
|
||||
|
||||
# Remove symlinks to old cards
|
||||
rm -f ips.html ips.pdf
|
||||
|
||||
# This will generate two files in the base dir: ips.pdf and ips.html
|
||||
python lib/ips-txt-to-html.py $SETTINGS
|
||||
lib/ips-txt-to-html.py $SETTINGS
|
||||
|
||||
for f in ips.html ips.pdf; do
|
||||
# Remove old versions of cards if they exist
|
||||
@@ -124,27 +127,21 @@ _cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
|
||||
_cmd_kube() {
|
||||
|
||||
# Install packages
|
||||
pssh "
|
||||
pssh --timeout 200 "
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
|
||||
sudo apt-key add - &&
|
||||
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh "
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
|
||||
# Work around https://github.com/kubernetes/kubernetes/issues/53356
|
||||
pssh "
|
||||
if [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
sudo systemctl stop kubelet
|
||||
sudo rm -rf /var/lib/kubelet/pki
|
||||
fi"
|
||||
|
||||
# Initialize kube master
|
||||
pssh "
|
||||
pssh --timeout 200 "
|
||||
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
sudo kubeadm init
|
||||
kubeadm token generate > /tmp/token
|
||||
sudo kubeadm init --token \$(cat /tmp/token)
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
@@ -157,15 +154,6 @@ _cmd_kube() {
|
||||
sudo chown -R docker /home/docker/.kube
|
||||
fi"
|
||||
|
||||
# Get bootstrap token
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
TOKEN_NAME=\$(kubectl -n kube-system get secret -o name | grep bootstrap-token)
|
||||
TOKEN_ID=\$(kubectl -n kube-system get \$TOKEN_NAME -o go-template --template '{{ index .data \"token-id\" }}' | base64 -d)
|
||||
TOKEN_SECRET=\$(kubectl -n kube-system get \$TOKEN_NAME -o go-template --template '{{ index .data \"token-secret\" }}' | base64 -d)
|
||||
echo \$TOKEN_ID.\$TOKEN_SECRET >/tmp/token
|
||||
fi"
|
||||
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
@@ -174,15 +162,46 @@ _cmd_kube() {
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
pssh "
|
||||
pssh --timeout 200 "
|
||||
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token)
|
||||
sudo kubeadm join --token \$TOKEN node1:6443
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
fi"
|
||||
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all notes are reporting as Ready"
|
||||
_cmd_kubetest() {
|
||||
# There are way too many backslashes in the command below.
|
||||
# Feel free to make that better ♥
|
||||
pssh "
|
||||
set -e
|
||||
[ -f /tmp/node ]
|
||||
if grep -q node1 /tmp/node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
TAG=$1
|
||||
@@ -280,6 +299,9 @@ _cmd_start() {
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(_cmd_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
TOKEN=$(get_token) # generate a timestamp token for this batch of VMs
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
|
||||
@@ -292,7 +314,7 @@ _cmd_start() {
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type t2.medium \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--client-token $TOKEN \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
@@ -387,9 +409,23 @@ pull_tag() {
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
elasticsearch:2 \
|
||||
postgres \
|
||||
redis \
|
||||
alpine \
|
||||
registry \
|
||||
nicolaka/netshoot \
|
||||
jpetazzo/trainingwheels \
|
||||
golang \
|
||||
training/namer \
|
||||
dockercoins/hasher \
|
||||
dockercoins/rng \
|
||||
dockercoins/webui \
|
||||
dockercoins/worker \
|
||||
logstash \
|
||||
prom/node-exporter \
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
sudo -u docker docker pull $I
|
||||
done'
|
||||
@@ -430,6 +466,7 @@ tag_is_reachable() {
|
||||
}
|
||||
|
||||
test_tag() {
|
||||
TAG=$1
|
||||
ips_file=tags/$TAG/ips.txt
|
||||
info "Picking a random IP address in $ips_file to run tests."
|
||||
n=$((1 + $RANDOM % $(wc -l <$ips_file)))
|
||||
|
||||
@@ -13,6 +13,7 @@ COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
#################################
|
||||
|
||||
@@ -45,7 +46,7 @@ def system(cmd):
|
||||
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
system("if mountpoint -q /mnt; then sudo mkdir /mnt/docker && sudo ln -s /mnt/docker /var/lib/docker; fi")
|
||||
system("if mountpoint -q /mnt; then sudo mkdir -p /mnt/docker && sudo ln -sfn /mnt/docker /var/lib/docker; fi")
|
||||
|
||||
# Put our public IP in /tmp/ipv4
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
@@ -54,9 +55,9 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
|
||||
# Add a "docker" user with password "training"
|
||||
# Add a "docker" user with password coming from the settings
|
||||
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:training | sudo chpasswd")
|
||||
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
@@ -108,7 +109,7 @@ system("sudo chmod +x /usr/local/bin/docker-machine")
|
||||
system("docker-machine version")
|
||||
|
||||
system("sudo apt-get remove -y --purge dnsmasq-base")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh tree")
|
||||
|
||||
### Wait for Docker to be up.
|
||||
### (If we don't do this, Docker will not be responsive during the next step.)
|
||||
|
||||
5
prepare-vms/settings/csv.yaml
Normal file
5
prepare-vms/settings/csv.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: clusters.csv
|
||||
27
prepare-vms/settings/example.yaml
Normal file
27
prepare-vms/settings/example.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# customize your cluster size, your cards template, and the versions
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -17,8 +17,11 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.16.1
|
||||
machine_version: 0.12.0
|
||||
compose_version: 1.22.0
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
106
prepare-vms/settings/kube101.html
Normal file
106
prepare-vms/settings/kube101.html
Normal file
@@ -0,0 +1,106 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
27
prepare-vms/settings/kube101.yaml
Normal file
27
prepare-vms/settings/kube101.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
# 3 nodes for k8s 101 workshops
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: settings/kube101.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -1,7 +1,7 @@
|
||||
# This file is passed by trainer-cli to scripts/ips-txt-to-html.py
|
||||
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
clustersize: 3
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
@@ -17,8 +17,11 @@ paper_margin: 0.2in
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.16.1
|
||||
machine_version: 0.12.0
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
@@ -38,7 +38,7 @@ check_envvars() {
|
||||
if [ -z "${!envvar}" ]; then
|
||||
error "Environment variable $envvar is not set."
|
||||
if [ "$envvar" = "SSH_AUTH_SOCK" ]; then
|
||||
error "Hint: run '\$(ssh-agent) ; ssh-add' and try again?"
|
||||
error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
status=1
|
||||
fi
|
||||
|
||||
1
slides/_redirects
Normal file
1
slides/_redirects
Normal file
@@ -0,0 +1 @@
|
||||
/ /weka.yml.html 200!
|
||||
17
slides/appendcheck.py
Executable file
17
slides/appendcheck.py
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
filename = sys.argv[1]
|
||||
|
||||
logging.info("Checking file {}...".format(filename))
|
||||
text = subprocess.check_output(["./slidechecker.js", filename])
|
||||
html = open(filename).read()
|
||||
html = html.replace("</textarea>", "\n---\n```\n{}\n```\n</textarea>".format(text))
|
||||
|
||||
open(filename, "w").write(html)
|
||||
453
slides/autopilot/autotest.py
Executable file
453
slides/autopilot/autotest.py
Executable file
@@ -0,0 +1,453 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import click
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import select
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
import yaml
|
||||
|
||||
|
||||
logging.basicConfig(level=os.environ.get("LOG_LEVEL", "INFO"))
|
||||
|
||||
|
||||
TIMEOUT = 60 # 1 minute
|
||||
|
||||
# This one is not a constant. It's an ugly global.
|
||||
IPADDR = None
|
||||
|
||||
|
||||
class State(object):
|
||||
|
||||
def __init__(self):
|
||||
self.interactive = True
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.switch_desktop = False
|
||||
self.sync_slides = False
|
||||
self.open_links = False
|
||||
self.run_hidden = True
|
||||
self.slide = 1
|
||||
self.snippet = 0
|
||||
|
||||
def load(self):
|
||||
data = yaml.load(open("state.yaml"))
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
self.switch_desktop = bool(data["switch_desktop"])
|
||||
self.sync_slides = bool(data["sync_slides"])
|
||||
self.open_links = bool(data["open_links"])
|
||||
self.run_hidden = bool(data["run_hidden"])
|
||||
self.slide = int(data["slide"])
|
||||
self.snippet = int(data["snippet"])
|
||||
|
||||
def save(self):
|
||||
with open("state.yaml", "w") as f:
|
||||
yaml.dump(dict(
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
switch_desktop=self.switch_desktop,
|
||||
sync_slides=self.sync_slides,
|
||||
open_links=self.open_links,
|
||||
run_hidden=self.run_hidden,
|
||||
slide=self.slide,
|
||||
snippet=self.snippet,
|
||||
), f, default_flow_style=False)
|
||||
|
||||
|
||||
state = State()
|
||||
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
# A "snippet" is something that the user is supposed to do in the workshop.
|
||||
# Most of the "snippets" are shell commands.
|
||||
# Some of them can be key strokes or other actions.
|
||||
# In the markdown source, they are the code sections (identified by triple-
|
||||
# quotes) within .exercise[] sections.
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
# Extract the "method" (e.g. bash, keys, ...)
|
||||
# On multi-line snippets, the method is alone on the first line
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
self.method, self.data = content.split('\n', 1)
|
||||
else:
|
||||
self.method, self.data = content.split(' ', 1)
|
||||
self.data = self.data.strip()
|
||||
self.next = None
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
self.number = Slide.current_slide
|
||||
Slide.current_slide += 1
|
||||
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise:
|
||||
previous = None
|
||||
for snippet_content in exercise.split("```")[1::2]:
|
||||
snippet = Snippet(self, snippet_content)
|
||||
if previous:
|
||||
previous.next = snippet
|
||||
previous = snippet
|
||||
self.snippets.append(snippet)
|
||||
else:
|
||||
logging.warning("Exercise on slide {} does not have any ``` snippet."
|
||||
.format(self.number))
|
||||
self.debug()
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
def debug(self):
|
||||
logging.debug("\n{}\n{}\n{}".format(hrule(), self.content, hrule()))
|
||||
|
||||
|
||||
def focus_slides():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "3"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_terminal():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "2"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
def focus_browser():
|
||||
if not state.switch_desktop:
|
||||
return
|
||||
subprocess.check_output(["i3-msg", "workspace", "4"])
|
||||
subprocess.check_output(["i3-msg", "workspace", "1"])
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
|
||||
# Sleeps the indicated delay, but interruptible by pressing ENTER.
|
||||
# If interrupted, returns True.
|
||||
def interruptible_sleep(t):
|
||||
rfds, _, _ = select.select([0], [], [], t)
|
||||
return 0 in rfds
|
||||
|
||||
|
||||
def wait_for_string(s, timeout=TIMEOUT):
|
||||
logging.debug("Waiting for string: {}".format(s))
|
||||
deadline = time.time() + timeout
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
if s in output:
|
||||
return
|
||||
if interruptible_sleep(1): return
|
||||
raise Exception("Timed out while waiting for {}!".format(s))
|
||||
|
||||
|
||||
def wait_for_prompt():
|
||||
logging.debug("Waiting for prompt.")
|
||||
deadline = time.time() + TIMEOUT
|
||||
while time.time() < deadline:
|
||||
output = capture_pane()
|
||||
# If we are not at the bottom of the screen, there will be a bunch of extra \n's
|
||||
output = output.rstrip('\n')
|
||||
last_line = output.split('\n')[-1]
|
||||
# Our custom prompt on the VMs has two lines; the 2nd line is just '$'
|
||||
if last_line == "$":
|
||||
# This is a perfect opportunity to grab the node's IP address
|
||||
global IPADDR
|
||||
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
return
|
||||
# When we are in an alpine container, the prompt will be "/ #"
|
||||
if last_line == "/ #":
|
||||
return
|
||||
# We did not recognize a known prompt; wait a bit and check again
|
||||
logging.debug("Could not find a known prompt on last line: {!r}"
|
||||
.format(last_line))
|
||||
if interruptible_sleep(1): return
|
||||
raise Exception("Timed out while waiting for prompt!")
|
||||
|
||||
|
||||
def check_exit_status():
|
||||
if not state.verify_status:
|
||||
return
|
||||
token = uuid.uuid4().hex
|
||||
data = "echo {} $?\n".format(token)
|
||||
logging.debug("Sending {!r} to get exit status.".format(data))
|
||||
send_keys(data)
|
||||
time.sleep(0.5)
|
||||
wait_for_prompt()
|
||||
screen = capture_pane()
|
||||
status = re.findall("\n{} ([0-9]+)\n".format(token), screen, re.MULTILINE)
|
||||
logging.debug("Got exit status: {}.".format(status))
|
||||
if len(status) == 0:
|
||||
raise Exception("Couldn't retrieve status code {}. Timed out?".format(token))
|
||||
if len(status) > 1:
|
||||
raise Exception("More than one status code {}. I'm seeing double! Shoot them both.".format(token))
|
||||
code = int(status[0])
|
||||
if code != 0:
|
||||
raise Exception("Non-zero exit status: {}.".format(code))
|
||||
# Otherwise just return peacefully.
|
||||
|
||||
|
||||
def setup_tmux_and_ssh():
|
||||
if subprocess.call(["tmux", "has-session"]):
|
||||
logging.error("Couldn't connect to tmux. Please setup tmux first.")
|
||||
ipaddr = open("../../prepare-vms/ips.txt").read().split("\n")[0]
|
||||
uid = os.getuid()
|
||||
|
||||
raise Exception("""
|
||||
1. If you're running this directly from a node:
|
||||
|
||||
tmux
|
||||
|
||||
2. If you want to control a remote tmux:
|
||||
|
||||
rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-1001/default docker@{ipaddr} tmux new-session -As 0
|
||||
|
||||
3. If you cannot control a remote tmux:
|
||||
|
||||
tmux new-session ssh docker@{ipaddr}
|
||||
""".format(uid=uid, ipaddr=ipaddr))
|
||||
else:
|
||||
logging.info("Found tmux session. Trying to acquire shell prompt.")
|
||||
wait_for_prompt()
|
||||
logging.info("Successfully connected to test cluster in tmux session.")
|
||||
|
||||
|
||||
slides = [Slide("Dummy slide zero")]
|
||||
content = open(sys.argv[1]).read()
|
||||
|
||||
# OK, this part is definitely hackish, and will break if the
|
||||
# excludedClasses parameter is not on a single line.
|
||||
excluded_classes = re.findall("excludedClasses: (\[.*\])", content)
|
||||
excluded_classes = set(eval(excluded_classes[0]))
|
||||
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slide_classes = re.findall("class: (.*)", slide)
|
||||
if slide_classes:
|
||||
slide_classes = slide_classes[0].split(",")
|
||||
slide_classes = [c.strip() for c in slide_classes]
|
||||
if excluded_classes & set(slide_classes):
|
||||
logging.info("Skipping excluded slide.")
|
||||
continue
|
||||
slides.append(Slide(slide))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
if state.simulate_type and data[0] != '^':
|
||||
for key in data:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
subprocess.check_call(["tmux", "send-keys", key])
|
||||
if interruptible_sleep(0.15*random.random()): return
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
else:
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"]).decode('utf-8')
|
||||
|
||||
|
||||
setup_tmux_and_ssh()
|
||||
|
||||
|
||||
try:
|
||||
state.load()
|
||||
logging.info("Successfully loaded state from file.")
|
||||
# Let's override the starting state, so that when an error occurs,
|
||||
# we can restart the auto-tester and then single-step or debug.
|
||||
# (Instead of running again through the same issue immediately.)
|
||||
state.interactive = True
|
||||
except Exception as e:
|
||||
logging.exception("Could not load state from file.")
|
||||
logging.warning("Using default values.")
|
||||
|
||||
def move_forward():
|
||||
state.snippet += 1
|
||||
if state.snippet > len(slides[state.slide].snippets):
|
||||
state.slide += 1
|
||||
state.snippet = 0
|
||||
check_bounds()
|
||||
|
||||
|
||||
def move_backward():
|
||||
state.snippet -= 1
|
||||
if state.snippet < 0:
|
||||
state.slide -= 1
|
||||
state.snippet = 0
|
||||
check_bounds()
|
||||
|
||||
|
||||
def check_bounds():
|
||||
if state.slide < 1:
|
||||
state.slide = 1
|
||||
if state.slide >= len(slides):
|
||||
state.slide = len(slides)-1
|
||||
|
||||
|
||||
while True:
|
||||
state.save()
|
||||
slide = slides[state.slide]
|
||||
snippet = slide.snippets[state.snippet-1] if state.snippet else None
|
||||
click.clear()
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
|
||||
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
|
||||
.format(state.slide, len(slides)-1,
|
||||
state.snippet, len(slide.snippets) if slide.snippets else 0,
|
||||
state.simulate_type, state.verify_status,
|
||||
state.switch_desktop, state.sync_slides,
|
||||
state.open_links, state.run_hidden))
|
||||
print(hrule())
|
||||
if snippet:
|
||||
print(slide.content.replace(snippet.content, ansi(7)(snippet.content)))
|
||||
focus_terminal()
|
||||
else:
|
||||
print(slide.content)
|
||||
if state.sync_slides:
|
||||
subprocess.check_output(["./gotoslide.js", str(slide.number)])
|
||||
focus_slides()
|
||||
print(hrule())
|
||||
if state.interactive:
|
||||
print("y/⎵/⏎ Execute snippet or advance to next snippet")
|
||||
print("p/← Previous")
|
||||
print("n/→ Next")
|
||||
print("s Simulate keystrokes")
|
||||
print("v Validate exit status")
|
||||
print("d Switch desktop")
|
||||
print("k Sync slides")
|
||||
print("o Open links")
|
||||
print("h Run hidden commands")
|
||||
print("g Go to a specific slide")
|
||||
print("q Quit")
|
||||
print("c Continue non-interactively until next error")
|
||||
command = click.getchar()
|
||||
else:
|
||||
command = "y"
|
||||
|
||||
if command in ("n", "\x1b[C"):
|
||||
move_forward()
|
||||
elif command in ("p", "\x1b[D"):
|
||||
move_backward()
|
||||
elif command == "s":
|
||||
state.simulate_type = not state.simulate_type
|
||||
elif command == "v":
|
||||
state.verify_status = not state.verify_status
|
||||
elif command == "d":
|
||||
state.switch_desktop = not state.switch_desktop
|
||||
elif command == "k":
|
||||
state.sync_slides = not state.sync_slides
|
||||
elif command == "o":
|
||||
state.open_links = not state.open_links
|
||||
elif command == "h":
|
||||
state.run_hidden = not state.run_hidden
|
||||
elif command == "g":
|
||||
state.slide = click.prompt("Enter slide number", type=int)
|
||||
state.snippet = 0
|
||||
check_bounds()
|
||||
elif command == "q":
|
||||
break
|
||||
elif command == "c":
|
||||
# continue until next timeout
|
||||
state.interactive = False
|
||||
elif command in ("y", "\r", " "):
|
||||
if not snippet:
|
||||
# Advance to next snippet
|
||||
# Advance until a slide that has snippets
|
||||
while not slides[state.slide].snippets:
|
||||
move_forward()
|
||||
# But stop if we reach the last slide
|
||||
if state.slide == len(slides)-1:
|
||||
break
|
||||
# And then advance to the snippet
|
||||
move_forward()
|
||||
continue
|
||||
method, data = snippet.method, snippet.data
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash" or (method == "hide" and state.run_hidden):
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Remove backticks (they are used to highlight sections)
|
||||
data = data.replace('`', '')
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
if snippet.next and snippet.next.method == "wait":
|
||||
wait_for_string(snippet.next.data)
|
||||
elif snippet.next and snippet.next.method == "longwait":
|
||||
wait_for_string(snippet.next.data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
move_forward()
|
||||
|
||||
else:
|
||||
logging.warning("Unknown command {}.".format(command))
|
||||
17
slides/autopilot/gotoslide.js
Executable file
17
slides/autopilot/gotoslide.js
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/* Expects a slide number as first argument.
|
||||
* Will connect to the local pub/sub server,
|
||||
* and issue a "go to slide X" command, which
|
||||
* will be sent to all connected browsers.
|
||||
*/
|
||||
|
||||
var io = require('socket.io-client');
|
||||
var socket = io('http://localhost:3000');
|
||||
socket.on('connect_error', function(){
|
||||
console.log('connection error');
|
||||
socket.close();
|
||||
});
|
||||
socket.emit('slide change', process.argv[2], function(){
|
||||
socket.close();
|
||||
});
|
||||
603
slides/autopilot/package-lock.json
generated
Normal file
603
slides/autopilot/package-lock.json
generated
Normal file
@@ -0,0 +1,603 @@
|
||||
{
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"lockfileVersion": 1,
|
||||
"requires": true,
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.4",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.4.tgz",
|
||||
"integrity": "sha1-hiRnWMfdbSGmR0/whKR0DsBesh8=",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
}
|
||||
},
|
||||
"after": {
|
||||
"version": "0.8.2",
|
||||
"resolved": "https://registry.npmjs.org/after/-/after-0.8.2.tgz",
|
||||
"integrity": "sha1-/ts5T58OAqqXaOcCvaI7UF+ufh8="
|
||||
},
|
||||
"array-flatten": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
||||
"integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI="
|
||||
},
|
||||
"arraybuffer.slice": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz",
|
||||
"integrity": "sha1-8zshWfBTKj8xB6JywMz70a0peco="
|
||||
},
|
||||
"async-limiter": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
|
||||
"integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg=="
|
||||
},
|
||||
"backo2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/backo2/-/backo2-1.0.2.tgz",
|
||||
"integrity": "sha1-MasayLEpNjRj41s+u2n038+6eUc="
|
||||
},
|
||||
"base64-arraybuffer": {
|
||||
"version": "0.1.5",
|
||||
"resolved": "https://registry.npmjs.org/base64-arraybuffer/-/base64-arraybuffer-0.1.5.tgz",
|
||||
"integrity": "sha1-c5JncZI7Whl0etZmqlzUv5xunOg="
|
||||
},
|
||||
"base64id": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/base64id/-/base64id-1.0.0.tgz",
|
||||
"integrity": "sha1-R2iMuZu2gE8OBtPnY7HDLlfY5rY="
|
||||
},
|
||||
"better-assert": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/better-assert/-/better-assert-1.0.2.tgz",
|
||||
"integrity": "sha1-QIZrnhueC1W0gYlDEeaPr/rrxSI=",
|
||||
"requires": {
|
||||
"callsite": "1.0.0"
|
||||
}
|
||||
},
|
||||
"blob": {
|
||||
"version": "0.0.4",
|
||||
"resolved": "https://registry.npmjs.org/blob/-/blob-0.0.4.tgz",
|
||||
"integrity": "sha1-vPEwUspURj8w+fx+lbmkdjCpSSE="
|
||||
},
|
||||
"body-parser": {
|
||||
"version": "1.18.2",
|
||||
"resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.18.2.tgz",
|
||||
"integrity": "sha1-h2eKGdhLR9hZuDGZvVm84iKxBFQ=",
|
||||
"requires": {
|
||||
"bytes": "3.0.0",
|
||||
"content-type": "1.0.4",
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"http-errors": "1.6.2",
|
||||
"iconv-lite": "0.4.19",
|
||||
"on-finished": "2.3.0",
|
||||
"qs": "6.5.1",
|
||||
"raw-body": "2.3.2",
|
||||
"type-is": "1.6.15"
|
||||
}
|
||||
},
|
||||
"bytes": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
|
||||
"integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg="
|
||||
},
|
||||
"callsite": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/callsite/-/callsite-1.0.0.tgz",
|
||||
"integrity": "sha1-KAOY5dZkvXQDi28JBRU+borxvCA="
|
||||
},
|
||||
"component-bind": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/component-bind/-/component-bind-1.0.0.tgz",
|
||||
"integrity": "sha1-AMYIq33Nk4l8AAllGx06jh5zu9E="
|
||||
},
|
||||
"component-emitter": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
|
||||
"integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
|
||||
},
|
||||
"component-inherit": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/component-inherit/-/component-inherit-0.0.3.tgz",
|
||||
"integrity": "sha1-ZF/ErfWLcrZJ1crmUTVhnbJv8UM="
|
||||
},
|
||||
"content-disposition": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
|
||||
"integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ="
|
||||
},
|
||||
"content-type": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
|
||||
"integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA=="
|
||||
},
|
||||
"cookie": {
|
||||
"version": "0.3.1",
|
||||
"resolved": "https://registry.npmjs.org/cookie/-/cookie-0.3.1.tgz",
|
||||
"integrity": "sha1-5+Ch+e9DtMi6klxcWpboBtFoc7s="
|
||||
},
|
||||
"cookie-signature": {
|
||||
"version": "1.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
|
||||
"integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw="
|
||||
},
|
||||
"debug": {
|
||||
"version": "2.6.9",
|
||||
"resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
|
||||
"integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
|
||||
"requires": {
|
||||
"ms": "2.0.0"
|
||||
}
|
||||
},
|
||||
"depd": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/depd/-/depd-1.1.1.tgz",
|
||||
"integrity": "sha1-V4O04cRZ8G+lyif5kfPQbnoxA1k="
|
||||
},
|
||||
"destroy": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
|
||||
"integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA="
|
||||
},
|
||||
"ee-first": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
|
||||
"integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0="
|
||||
},
|
||||
"encodeurl": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.1.tgz",
|
||||
"integrity": "sha1-eePVhlU0aQn+bw9Fpd5oEDspTSA="
|
||||
},
|
||||
"engine.io": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io/-/engine.io-3.1.4.tgz",
|
||||
"integrity": "sha1-PQIRtwpVLOhB/8fahiezAamkFi4=",
|
||||
"requires": {
|
||||
"accepts": "1.3.3",
|
||||
"base64id": "1.0.0",
|
||||
"cookie": "0.3.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"uws": "0.14.5",
|
||||
"ws": "3.3.3"
|
||||
},
|
||||
"dependencies": {
|
||||
"accepts": {
|
||||
"version": "1.3.3",
|
||||
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.3.tgz",
|
||||
"integrity": "sha1-w8p0NJOGSMPg2cHjKN1otiLChMo=",
|
||||
"requires": {
|
||||
"mime-types": "2.1.17",
|
||||
"negotiator": "0.6.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"engine.io-client": {
|
||||
"version": "3.1.4",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-client/-/engine.io-client-3.1.4.tgz",
|
||||
"integrity": "sha1-T88TcLRxY70s6b4nM5ckMDUNTqE=",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"component-inherit": "0.0.3",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-parser": "2.1.1",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"ws": "3.3.3",
|
||||
"xmlhttprequest-ssl": "1.5.4",
|
||||
"yeast": "0.1.2"
|
||||
}
|
||||
},
|
||||
"engine.io-parser": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/engine.io-parser/-/engine.io-parser-2.1.1.tgz",
|
||||
"integrity": "sha1-4Ps/DgRi9/WLt3waUun1p+JuRmg=",
|
||||
"requires": {
|
||||
"after": "0.8.2",
|
||||
"arraybuffer.slice": "0.0.6",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"blob": "0.0.4",
|
||||
"has-binary2": "1.0.2"
|
||||
}
|
||||
},
|
||||
"escape-html": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
|
||||
"integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg="
|
||||
},
|
||||
"etag": {
|
||||
"version": "1.8.1",
|
||||
"resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
|
||||
"integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc="
|
||||
},
|
||||
"express": {
|
||||
"version": "4.16.2",
|
||||
"resolved": "https://registry.npmjs.org/express/-/express-4.16.2.tgz",
|
||||
"integrity": "sha1-41xt/i1kt9ygpc1PIXgb4ymeB2w=",
|
||||
"requires": {
|
||||
"accepts": "1.3.4",
|
||||
"array-flatten": "1.1.1",
|
||||
"body-parser": "1.18.2",
|
||||
"content-disposition": "0.5.2",
|
||||
"content-type": "1.0.4",
|
||||
"cookie": "0.3.1",
|
||||
"cookie-signature": "1.0.6",
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"etag": "1.8.1",
|
||||
"finalhandler": "1.1.0",
|
||||
"fresh": "0.5.2",
|
||||
"merge-descriptors": "1.0.1",
|
||||
"methods": "1.1.2",
|
||||
"on-finished": "2.3.0",
|
||||
"parseurl": "1.3.2",
|
||||
"path-to-regexp": "0.1.7",
|
||||
"proxy-addr": "2.0.2",
|
||||
"qs": "6.5.1",
|
||||
"range-parser": "1.2.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"send": "0.16.1",
|
||||
"serve-static": "1.13.1",
|
||||
"setprototypeof": "1.1.0",
|
||||
"statuses": "1.3.1",
|
||||
"type-is": "1.6.15",
|
||||
"utils-merge": "1.0.1",
|
||||
"vary": "1.1.2"
|
||||
}
|
||||
},
|
||||
"finalhandler": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.0.tgz",
|
||||
"integrity": "sha1-zgtoVbRYU+eRsvzGgARtiCU91/U=",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"on-finished": "2.3.0",
|
||||
"parseurl": "1.3.2",
|
||||
"statuses": "1.3.1",
|
||||
"unpipe": "1.0.0"
|
||||
}
|
||||
},
|
||||
"forwarded": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz",
|
||||
"integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ="
|
||||
},
|
||||
"fresh": {
|
||||
"version": "0.5.2",
|
||||
"resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
|
||||
"integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac="
|
||||
},
|
||||
"has-binary2": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-binary2/-/has-binary2-1.0.2.tgz",
|
||||
"integrity": "sha1-6D26SfC5vk0CbSc2U1DZ8D9Uvpg=",
|
||||
"requires": {
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
},
|
||||
"has-cors": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-cors/-/has-cors-1.1.0.tgz",
|
||||
"integrity": "sha1-XkdHk/fqmEPRu5nCPu9J/xJv/zk="
|
||||
},
|
||||
"http-errors": {
|
||||
"version": "1.6.2",
|
||||
"resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.2.tgz",
|
||||
"integrity": "sha1-CgAsyFcHGSp+eUbO7cERVfYOxzY=",
|
||||
"requires": {
|
||||
"depd": "1.1.1",
|
||||
"inherits": "2.0.3",
|
||||
"setprototypeof": "1.0.3",
|
||||
"statuses": "1.3.1"
|
||||
},
|
||||
"dependencies": {
|
||||
"setprototypeof": {
|
||||
"version": "1.0.3",
|
||||
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.0.3.tgz",
|
||||
"integrity": "sha1-ZlZ+NwQ+608E2RvWWMDL77VbjgQ="
|
||||
}
|
||||
}
|
||||
},
|
||||
"iconv-lite": {
|
||||
"version": "0.4.19",
|
||||
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.19.tgz",
|
||||
"integrity": "sha512-oTZqweIP51xaGPI4uPa56/Pri/480R+mo7SeU+YETByQNhDG55ycFyNLIgta9vXhILrxXDmF7ZGhqZIcuN0gJQ=="
|
||||
},
|
||||
"indexof": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/indexof/-/indexof-0.0.1.tgz",
|
||||
"integrity": "sha1-gtwzbSMrkGIXnQWrMpOmYFn9Q10="
|
||||
},
|
||||
"inherits": {
|
||||
"version": "2.0.3",
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
|
||||
"integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4="
|
||||
},
|
||||
"ipaddr.js": {
|
||||
"version": "1.5.2",
|
||||
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.5.2.tgz",
|
||||
"integrity": "sha1-1LUFvemUaYfM8PxY2QEP+WB+P6A="
|
||||
},
|
||||
"isarray": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.1.tgz",
|
||||
"integrity": "sha1-o32U7ZzaLVmGXJ92/llu4fM4dB4="
|
||||
},
|
||||
"media-typer": {
|
||||
"version": "0.3.0",
|
||||
"resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
|
||||
"integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g="
|
||||
},
|
||||
"merge-descriptors": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz",
|
||||
"integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E="
|
||||
},
|
||||
"methods": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
|
||||
"integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4="
|
||||
},
|
||||
"mime": {
|
||||
"version": "1.4.1",
|
||||
"resolved": "https://registry.npmjs.org/mime/-/mime-1.4.1.tgz",
|
||||
"integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ=="
|
||||
},
|
||||
"mime-db": {
|
||||
"version": "1.30.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.30.0.tgz",
|
||||
"integrity": "sha1-dMZD2i3Z1qRTmZY0ZbJtXKfXHwE="
|
||||
},
|
||||
"mime-types": {
|
||||
"version": "2.1.17",
|
||||
"resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.17.tgz",
|
||||
"integrity": "sha1-Cdejk/A+mVp5+K+Fe3Cp4KsWVXo=",
|
||||
"requires": {
|
||||
"mime-db": "1.30.0"
|
||||
}
|
||||
},
|
||||
"ms": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
|
||||
"integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
|
||||
},
|
||||
"negotiator": {
|
||||
"version": "0.6.1",
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
|
||||
"integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk="
|
||||
},
|
||||
"object-component": {
|
||||
"version": "0.0.3",
|
||||
"resolved": "https://registry.npmjs.org/object-component/-/object-component-0.0.3.tgz",
|
||||
"integrity": "sha1-8MaapQ78lbhmwYb0AKM3acsvEpE="
|
||||
},
|
||||
"on-finished": {
|
||||
"version": "2.3.0",
|
||||
"resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
|
||||
"integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
|
||||
"requires": {
|
||||
"ee-first": "1.1.1"
|
||||
}
|
||||
},
|
||||
"parseqs": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseqs/-/parseqs-0.0.5.tgz",
|
||||
"integrity": "sha1-1SCKNzjkZ2bikbouoXNoSSGouJ0=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
},
|
||||
"parseuri": {
|
||||
"version": "0.0.5",
|
||||
"resolved": "https://registry.npmjs.org/parseuri/-/parseuri-0.0.5.tgz",
|
||||
"integrity": "sha1-gCBKUNTbt3m/3G6+J3jZDkvOMgo=",
|
||||
"requires": {
|
||||
"better-assert": "1.0.2"
|
||||
}
|
||||
},
|
||||
"parseurl": {
|
||||
"version": "1.3.2",
|
||||
"resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
|
||||
"integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M="
|
||||
},
|
||||
"path-to-regexp": {
|
||||
"version": "0.1.7",
|
||||
"resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz",
|
||||
"integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w="
|
||||
},
|
||||
"proxy-addr": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.2.tgz",
|
||||
"integrity": "sha1-ZXFQT0e7mI7IGAJT+F3X4UlSvew=",
|
||||
"requires": {
|
||||
"forwarded": "0.1.2",
|
||||
"ipaddr.js": "1.5.2"
|
||||
}
|
||||
},
|
||||
"qs": {
|
||||
"version": "6.5.1",
|
||||
"resolved": "https://registry.npmjs.org/qs/-/qs-6.5.1.tgz",
|
||||
"integrity": "sha512-eRzhrN1WSINYCDCbrz796z37LOe3m5tmW7RQf6oBntukAG1nmovJvhnwHHRMAfeoItc1m2Hk02WER2aQ/iqs+A=="
|
||||
},
|
||||
"range-parser": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
|
||||
"integrity": "sha1-9JvmtIeJTdxA3MlKMi9hEJLgDV4="
|
||||
},
|
||||
"raw-body": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.2.tgz",
|
||||
"integrity": "sha1-vNYMd9Prk83gBQKVw/N5OJvIj4k=",
|
||||
"requires": {
|
||||
"bytes": "3.0.0",
|
||||
"http-errors": "1.6.2",
|
||||
"iconv-lite": "0.4.19",
|
||||
"unpipe": "1.0.0"
|
||||
}
|
||||
},
|
||||
"safe-buffer": {
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.1.tgz",
|
||||
"integrity": "sha512-kKvNJn6Mm93gAczWVJg7wH+wGYWNrDHdWvpUmHyEsgCtIwwo3bqPtV4tR5tuPaUhTOo/kvhVwd8XwwOllGYkbg=="
|
||||
},
|
||||
"send": {
|
||||
"version": "0.16.1",
|
||||
"resolved": "https://registry.npmjs.org/send/-/send-0.16.1.tgz",
|
||||
"integrity": "sha512-ElCLJdJIKPk6ux/Hocwhk7NFHpI3pVm/IZOYWqUmoxcgeyM+MpxHHKhb8QmlJDX1pU6WrgaHBkVNm73Sv7uc2A==",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"depd": "1.1.1",
|
||||
"destroy": "1.0.4",
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"etag": "1.8.1",
|
||||
"fresh": "0.5.2",
|
||||
"http-errors": "1.6.2",
|
||||
"mime": "1.4.1",
|
||||
"ms": "2.0.0",
|
||||
"on-finished": "2.3.0",
|
||||
"range-parser": "1.2.0",
|
||||
"statuses": "1.3.1"
|
||||
}
|
||||
},
|
||||
"serve-static": {
|
||||
"version": "1.13.1",
|
||||
"resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.13.1.tgz",
|
||||
"integrity": "sha512-hSMUZrsPa/I09VYFJwa627JJkNs0NrfL1Uzuup+GqHfToR2KcsXFymXSV90hoyw3M+msjFuQly+YzIH/q0MGlQ==",
|
||||
"requires": {
|
||||
"encodeurl": "1.0.1",
|
||||
"escape-html": "1.0.3",
|
||||
"parseurl": "1.3.2",
|
||||
"send": "0.16.1"
|
||||
}
|
||||
},
|
||||
"setprototypeof": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
|
||||
"integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
|
||||
},
|
||||
"socket.io": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io/-/socket.io-2.0.4.tgz",
|
||||
"integrity": "sha1-waRZDO/4fs8TxyZS8Eb3FrKeYBQ=",
|
||||
"requires": {
|
||||
"debug": "2.6.9",
|
||||
"engine.io": "3.1.4",
|
||||
"socket.io-adapter": "1.1.1",
|
||||
"socket.io-client": "2.0.4",
|
||||
"socket.io-parser": "3.1.2"
|
||||
}
|
||||
},
|
||||
"socket.io-adapter": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-adapter/-/socket.io-adapter-1.1.1.tgz",
|
||||
"integrity": "sha1-KoBeihTWNyEk3ZFZrUUC+MsH8Gs="
|
||||
},
|
||||
"socket.io-client": {
|
||||
"version": "2.0.4",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-client/-/socket.io-client-2.0.4.tgz",
|
||||
"integrity": "sha1-CRilUkBtxeVAs4Dc2Xr8SmQzL44=",
|
||||
"requires": {
|
||||
"backo2": "1.0.2",
|
||||
"base64-arraybuffer": "0.1.5",
|
||||
"component-bind": "1.0.0",
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"engine.io-client": "3.1.4",
|
||||
"has-cors": "1.1.0",
|
||||
"indexof": "0.0.1",
|
||||
"object-component": "0.0.3",
|
||||
"parseqs": "0.0.5",
|
||||
"parseuri": "0.0.5",
|
||||
"socket.io-parser": "3.1.2",
|
||||
"to-array": "0.1.4"
|
||||
}
|
||||
},
|
||||
"socket.io-parser": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/socket.io-parser/-/socket.io-parser-3.1.2.tgz",
|
||||
"integrity": "sha1-28IoIVH8T6675Aru3Ady66YZ9/I=",
|
||||
"requires": {
|
||||
"component-emitter": "1.2.1",
|
||||
"debug": "2.6.9",
|
||||
"has-binary2": "1.0.2",
|
||||
"isarray": "2.0.1"
|
||||
}
|
||||
},
|
||||
"statuses": {
|
||||
"version": "1.3.1",
|
||||
"resolved": "https://registry.npmjs.org/statuses/-/statuses-1.3.1.tgz",
|
||||
"integrity": "sha1-+vUbnrdKrvOzrPStX2Gr8ky3uT4="
|
||||
},
|
||||
"to-array": {
|
||||
"version": "0.1.4",
|
||||
"resolved": "https://registry.npmjs.org/to-array/-/to-array-0.1.4.tgz",
|
||||
"integrity": "sha1-F+bBH3PdTz10zaek/zI46a2b+JA="
|
||||
},
|
||||
"type-is": {
|
||||
"version": "1.6.15",
|
||||
"resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.15.tgz",
|
||||
"integrity": "sha1-yrEPtJCeRByChC6v4a1kbIGARBA=",
|
||||
"requires": {
|
||||
"media-typer": "0.3.0",
|
||||
"mime-types": "2.1.17"
|
||||
}
|
||||
},
|
||||
"ultron": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz",
|
||||
"integrity": "sha512-UIEXBNeYmKptWH6z8ZnqTeS8fV74zG0/eRU9VGkpzz+LIJNs8W/zM/L+7ctCkRrgbNnnR0xxw4bKOr0cW0N0Og=="
|
||||
},
|
||||
"unpipe": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
|
||||
"integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw="
|
||||
},
|
||||
"utils-merge": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
"integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM="
|
||||
},
|
||||
"uws": {
|
||||
"version": "0.14.5",
|
||||
"resolved": "https://registry.npmjs.org/uws/-/uws-0.14.5.tgz",
|
||||
"integrity": "sha1-Z6rzPEaypYel9mZtAPdpEyjxSdw=",
|
||||
"optional": true
|
||||
},
|
||||
"vary": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
|
||||
"integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw="
|
||||
},
|
||||
"ws": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/ws/-/ws-3.3.3.tgz",
|
||||
"integrity": "sha512-nnWLa/NwZSt4KQJu51MYlCcSQ5g7INpOrOMt4XV8j4dqTXdmlUmSHQ8/oLC069ckre0fRsgfvsKwbTdtKLCDkA==",
|
||||
"requires": {
|
||||
"async-limiter": "1.0.0",
|
||||
"safe-buffer": "5.1.1",
|
||||
"ultron": "1.1.1"
|
||||
}
|
||||
},
|
||||
"xmlhttprequest-ssl": {
|
||||
"version": "1.5.4",
|
||||
"resolved": "https://registry.npmjs.org/xmlhttprequest-ssl/-/xmlhttprequest-ssl-1.5.4.tgz",
|
||||
"integrity": "sha1-BPVgkVcks4kIhxXMDteBPpZ3v1c="
|
||||
},
|
||||
"yeast": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/yeast/-/yeast-0.1.2.tgz",
|
||||
"integrity": "sha1-AI4G2AlDIMNy28L47XagymyKxBk="
|
||||
}
|
||||
}
|
||||
}
|
||||
8
slides/autopilot/package.json
Normal file
8
slides/autopilot/package.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"name": "container-training-pub-sub-server",
|
||||
"version": "0.0.1",
|
||||
"dependencies": {
|
||||
"express": "^4.16.2",
|
||||
"socket.io": "^2.0.4"
|
||||
}
|
||||
}
|
||||
21
slides/autopilot/remote.js
Normal file
21
slides/autopilot/remote.js
Normal file
@@ -0,0 +1,21 @@
|
||||
/* This snippet is loaded from the workshop HTML file.
|
||||
* It sets up callbacks to synchronize the local slide
|
||||
* number with the remote pub/sub server.
|
||||
*/
|
||||
|
||||
var socket = io();
|
||||
var leader = true;
|
||||
|
||||
slideshow.on('showSlide', function (slide) {
|
||||
if (leader) {
|
||||
var n = slide.getSlideIndex()+1;
|
||||
socket.emit('slide change', n);
|
||||
}
|
||||
});
|
||||
|
||||
socket.on('slide change', function (n) {
|
||||
leader = false;
|
||||
slideshow.gotoSlide(n);
|
||||
leader = true;
|
||||
});
|
||||
|
||||
1
slides/autopilot/requirements.txt
Normal file
1
slides/autopilot/requirements.txt
Normal file
@@ -0,0 +1 @@
|
||||
click
|
||||
41
slides/autopilot/server.js
Executable file
41
slides/autopilot/server.js
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/* This is a very simple pub/sub server, allowing to
|
||||
* remote control browsers displaying the slides.
|
||||
* The browsers connect to this pub/sub server using
|
||||
* Socket.IO, and the server tells them which slides
|
||||
* to display.
|
||||
*
|
||||
* The server can be controlled with a little CLI,
|
||||
* or by one of the browsers.
|
||||
*/
|
||||
|
||||
var express = require('express');
|
||||
var app = express();
|
||||
var http = require('http').Server(app);
|
||||
var io = require('socket.io')(http);
|
||||
|
||||
app.get('/', function(req, res){
|
||||
res.send('container.training autopilot pub/sub server');
|
||||
});
|
||||
|
||||
/* Serve remote.js from the current directory */
|
||||
app.use(express.static('.'));
|
||||
|
||||
/* Serve slides etc. from current and the parent directory */
|
||||
app.use(express.static('..'));
|
||||
|
||||
io.on('connection', function(socket){
|
||||
console.log('a client connected: ' + socket.handshake.address);
|
||||
socket.on('slide change', function(n, ack){
|
||||
console.log('slide change: ' + n);
|
||||
socket.broadcast.emit('slide change', n);
|
||||
if (typeof ack === 'function') {
|
||||
ack();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
http.listen(3000, function(){
|
||||
console.log('listening on *:3000');
|
||||
});
|
||||
7
slides/autopilot/tmux-style.sh
Executable file
7
slides/autopilot/tmux-style.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/sh
|
||||
# This removes the clock (and other extraneous stuff) from the
|
||||
# tmux status bar, and it gives it a non-default color.
|
||||
tmux set-option -g status-left ""
|
||||
tmux set-option -g status-right ""
|
||||
tmux set-option -g status-style bg=cyan
|
||||
|
||||
@@ -1,15 +1,29 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
case "$1" in
|
||||
once)
|
||||
./index.py
|
||||
for YAML in *.yml; do
|
||||
./markmaker.py < $YAML > $YAML.html || {
|
||||
./markmaker.py $YAML > $YAML.html || {
|
||||
rm $YAML.html
|
||||
break
|
||||
}
|
||||
done
|
||||
if [ -n "$SLIDECHECKER" ]; then
|
||||
for YAML in *.yml; do
|
||||
./appendcheck.py $YAML.html
|
||||
done
|
||||
fi
|
||||
;;
|
||||
|
||||
forever)
|
||||
set +e
|
||||
# check if entr is installed
|
||||
if ! command -v entr >/dev/null; then
|
||||
echo >&2 "First install 'entr' with apt, brew, etc."
|
||||
exit
|
||||
fi
|
||||
|
||||
# There is a weird bug in entr, at least on MacOS,
|
||||
# where it doesn't restore the terminal to a clean
|
||||
# state when exitting. So let's try to work around
|
||||
|
||||
@@ -1,477 +0,0 @@
|
||||
# Our sample application
|
||||
|
||||
- Visit the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://github.com/jpetazzo/orchestration-workshop/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
- Let's look at the general layout of the source code:
|
||||
|
||||
there is a Compose file [docker-compose.yml](
|
||||
https://github.com/jpetazzo/orchestration-workshop/blob/master/dockercoins/docker-compose.yml) ...
|
||||
|
||||
... and 4 other services, each in its own directory:
|
||||
|
||||
- `rng` = web service generating random bytes
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
- `worker` = background process using `rng` and `hasher`
|
||||
- `webui` = web interface to watch progress
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file format version
|
||||
|
||||
*Particularly relevant if you have used Compose before...*
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
|
||||
---
|
||||
|
||||
## Links, naming, and service discovery
|
||||
|
||||
- Containers can have network aliases (resolvable through DNS)
|
||||
|
||||
- Compose file version 2+ makes each container reachable through its service name
|
||||
|
||||
- Compose file version 1 did require "links" sections
|
||||
|
||||
- Our code can connect to services using their short name
|
||||
|
||||
(instead of e.g. IP address or FQDN)
|
||||
|
||||
- Network aliases are automatically namespaced
|
||||
|
||||
(i.e. you can have multiple apps declaring and using a service named `database`)
|
||||
|
||||
---
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
(DockerCoins 2016 logo courtesy of [@XtlCnslt](https://twitter.com/xtlcnslt) and [@ndeloof](https://twitter.com/ndeloof). Thanks!)
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
- It is a DockerCoin miner! 💰🐳📦🚢
|
||||
|
||||
--
|
||||
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
|
||||
--
|
||||
|
||||
- How DockerCoins works:
|
||||
|
||||
- `worker` asks to `rng` to generate a few random bytes
|
||||
|
||||
- `worker` feeds these bytes into `hasher`
|
||||
|
||||
- and repeat forever!
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in your browser
|
||||
|
||||
---
|
||||
|
||||
## Getting the application source code
|
||||
|
||||
- We will clone the GitHub repository
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
if [ -d orchestration-workshop ]; then
|
||||
mv orchestration-workshop orchestration-workshop.$$
|
||||
fi
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/orchestration-workshop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(You can also fork the repository on GitHub and clone your fork if you prefer that.)
|
||||
|
||||
---
|
||||
|
||||
# Running the application
|
||||
|
||||
Without further ado, let's start our application.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait units of work done```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Compose tells Docker to build all container images (pulling
|
||||
the corresponding base images), then starts all containers,
|
||||
and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
## Lots of logs
|
||||
|
||||
- The application continuously generates logs
|
||||
|
||||
- We can see the `worker` service making requests to `rng` and `hasher`
|
||||
|
||||
- Let's put that in the background
|
||||
|
||||
.exercise[
|
||||
|
||||
- Stop the application by hitting `^C`
|
||||
|
||||
]
|
||||
|
||||
- `^C` stops all containers by sending them the `TERM` signal
|
||||
|
||||
- Some containers exit immediately, others take longer
|
||||
<br/>(because they don't handle `SIGTERM` and end up being killed after a 10s timeout)
|
||||
|
||||
---
|
||||
|
||||
## Restarting in the background
|
||||
|
||||
- Many flags and commands of Compose are modeled after those of `docker`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start the app in the background with the `-d` option:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
- Check that our app is running with the `ps` command:
|
||||
```bash
|
||||
docker-compose ps
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`docker-compose ps` also shows the ports exposed by the application.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing logs
|
||||
|
||||
- The `docker-compose logs` command works like `docker logs`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View all logs since container creation and exit when done:
|
||||
```bash
|
||||
docker-compose logs
|
||||
```
|
||||
|
||||
- Stream container logs, starting at the last 10 lines for each container:
|
||||
```bash
|
||||
docker-compose logs --tail 10 --follow
|
||||
```
|
||||
|
||||
<!--
|
||||
```wait units of work done```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Tip: use `^S` and `^Q` to pause/resume log output.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Upgrading from Compose 1.6
|
||||
|
||||
.warning[The `logs` command has changed between Compose 1.6 and 1.7!]
|
||||
|
||||
- Up to 1.6
|
||||
|
||||
- `docker-compose logs` is the equivalent of `logs --follow`
|
||||
|
||||
- `docker-compose logs` must be restarted if containers are added
|
||||
|
||||
- Since 1.7
|
||||
|
||||
- `--follow` must be specified explicitly
|
||||
|
||||
- new containers are automatically picked up by `docker-compose logs`
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the web UI
|
||||
|
||||
- The `webui` container exposes a web dashboard; let's view it
|
||||
|
||||
.exercise[
|
||||
|
||||
- With a web browser, connect to `node1` on port 8000
|
||||
|
||||
- Remember: the `nodeX` aliases are valid only on the nodes themselves
|
||||
|
||||
- In your browser, you need to enter the IP address of your node
|
||||
|
||||
<!-- ```open http://node1:8000``` -->
|
||||
|
||||
]
|
||||
|
||||
You should see a speed of approximately 4 hashes/second.
|
||||
|
||||
More precisely: 4 hashes/second, with regular dips down to zero.
|
||||
<br/>This is because Jérôme is incapable of writing good frontend code.
|
||||
<br/>Don't ask. Seriously, don't ask. This is embarrassing.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why does the speed seem irregular?
|
||||
|
||||
- The app actually has a constant, steady speed: 3.33 hashes/second
|
||||
<br/>
|
||||
(which corresponds to 1 hash every 0.3 seconds, for *reasons*)
|
||||
|
||||
- The worker doesn't update the counter after every loop, but up to once per second
|
||||
|
||||
- The speed is computed by the browser, checking the counter about once per second
|
||||
|
||||
- Between two consecutive updates, the counter will increase either by 4, or by 0
|
||||
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - etc.
|
||||
|
||||
*We told you to not ask!!!*
|
||||
|
||||
---
|
||||
|
||||
## Scaling up the application
|
||||
|
||||
- Our goal is to make that performance graph go up (without changing a line of code!)
|
||||
|
||||
--
|
||||
|
||||
- Before trying to scale the application, we'll figure out if we need more resources
|
||||
|
||||
(CPU, RAM...)
|
||||
|
||||
- For that, we will use good old UNIX tools on our Docker node
|
||||
|
||||
---
|
||||
|
||||
## Looking at resource usage
|
||||
|
||||
- Let's look at CPU, memory, and I/O usage
|
||||
|
||||
.exercise[
|
||||
|
||||
- run `top` to see CPU and memory usage (you should see idle cycles)
|
||||
|
||||
<!--
|
||||
```bash top```
|
||||
|
||||
```wait Tasks```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
- run `vmstat 1` to see I/O usage (si/so/bi/bo)
|
||||
<br/>(the 4 numbers should be almost zero, except `bo` for logging)
|
||||
|
||||
<!--
|
||||
```bash vmstat 1```
|
||||
|
||||
```wait memory```
|
||||
```keys ^C```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
We have available resources.
|
||||
|
||||
- Why?
|
||||
- How can we use them?
|
||||
|
||||
---
|
||||
|
||||
## Scaling workers on a single node
|
||||
|
||||
- Docker Compose supports scaling
|
||||
- Let's scale `worker` and see what happens!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start one more `worker` container:
|
||||
```bash
|
||||
docker-compose scale worker=2
|
||||
```
|
||||
|
||||
- Look at the performance graph (it should show a x2 improvement)
|
||||
|
||||
- Look at the aggregated logs of our containers (`worker_2` should show up)
|
||||
|
||||
- Look at the impact on CPU load with e.g. top (it should be negligible)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding more workers
|
||||
|
||||
- Great, let's add more workers and call it a day, then!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start eight more `worker` containers:
|
||||
```bash
|
||||
docker-compose scale worker=10
|
||||
```
|
||||
|
||||
- Look at the performance graph: does it show a x10 improvement?
|
||||
|
||||
- Look at the aggregated logs of our containers
|
||||
|
||||
- Look at the impact on CPU load and memory usage
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
# Identifying bottlenecks
|
||||
|
||||
- You should have seen a 3x speed bump (not 10x)
|
||||
|
||||
- Adding workers didn't result in linear improvement
|
||||
|
||||
- *Something else* is slowing us down
|
||||
|
||||
--
|
||||
|
||||
- ... But what?
|
||||
|
||||
--
|
||||
|
||||
- The code doesn't have instrumentation
|
||||
|
||||
- Let's use state-of-the-art HTTP performance analysis!
|
||||
<br/>(i.e. good old tools like `ab`, `httping`...)
|
||||
|
||||
---
|
||||
|
||||
## Accessing internal services
|
||||
|
||||
- `rng` and `hasher` are exposed on ports 8001 and 8002
|
||||
|
||||
- This is declared in the Compose file:
|
||||
|
||||
```yaml
|
||||
...
|
||||
rng:
|
||||
build: rng
|
||||
ports:
|
||||
- "8001:80"
|
||||
|
||||
hasher:
|
||||
build: hasher
|
||||
ports:
|
||||
- "8002:80"
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Measuring latency under load
|
||||
|
||||
We will use `httping`.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the latency of `rng`:
|
||||
```bash
|
||||
httping -c 10 localhost:8001
|
||||
```
|
||||
|
||||
- Check the latency of `hasher`:
|
||||
```bash
|
||||
httping -c 10 localhost:8002
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`rng` has a much higher latency than `hasher`.
|
||||
|
||||
---
|
||||
|
||||
## Let's draw hasty conclusions
|
||||
|
||||
- The bottleneck seems to be `rng`
|
||||
|
||||
- *What if* we don't have enough entropy and can't generate enough random numbers?
|
||||
|
||||
- We need to scale out the `rng` service on multiple machines!
|
||||
|
||||
Note: this is a fiction! We have enough entropy. But we need a pretext to scale out.
|
||||
|
||||
(In fact, the code of `rng` uses `/dev/urandom`, which never runs out of entropy...
|
||||
<br/>
|
||||
...and is [just as good as `/dev/random`](http://www.slideshare.net/PacSecJP/filippo-plain-simple-reality-of-entropy).)
|
||||
|
||||
---
|
||||
|
||||
## Clean up
|
||||
|
||||
- Before moving on, let's remove those containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Compose to remove everything:
|
||||
```bash
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
]
|
||||
@@ -1,9 +1,6 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Advanced Dockerfiles
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -37,18 +34,6 @@ In this section, we will see more Dockerfile commands.
|
||||
|
||||
---
|
||||
|
||||
## The `MAINTAINER` instruction
|
||||
|
||||
The `MAINTAINER` instruction tells you who wrote the `Dockerfile`.
|
||||
|
||||
```dockerfile
|
||||
MAINTAINER Docker Education Team <education@docker.com>
|
||||
```
|
||||
|
||||
It's optional but recommended.
|
||||
|
||||
---
|
||||
|
||||
## The `RUN` instruction
|
||||
|
||||
The `RUN` instruction can be specified in two ways.
|
||||
@@ -97,8 +82,6 @@ RUN apt-get update && apt-get install -y wget && apt-get clean
|
||||
|
||||
It is also possible to break a command onto multiple lines:
|
||||
|
||||
It is possible to execute multiple commands in a single step:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y wget \
|
||||
@@ -372,7 +355,7 @@ class: extra-details
|
||||
|
||||
## Overriding the `ENTRYPOINT` instruction
|
||||
|
||||
The entry point can be overriden as well.
|
||||
The entry point can be overridden as well.
|
||||
|
||||
```bash
|
||||
$ docker run -it training/ls
|
||||
@@ -433,5 +416,4 @@ ONBUILD COPY . /src
|
||||
```
|
||||
|
||||
* You can't chain `ONBUILD` instructions with `ONBUILD`.
|
||||
* `ONBUILD` can't be used to trigger `FROM` and `MAINTAINER`
|
||||
instructions.
|
||||
* `ONBUILD` can't be used to trigger `FROM` instructions.
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Ambassadors
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -40,7 +40,9 @@ ambassador containers.
|
||||
|
||||
---
|
||||
|
||||

|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
201
slides/containers/Application_Configuration.md
Normal file
201
slides/containers/Application_Configuration.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# Application Configuration
|
||||
|
||||
There are many ways to provide configuration to containerized applications.
|
||||
|
||||
There is no "best way" — it depends on factors like:
|
||||
|
||||
* configuration size,
|
||||
|
||||
* mandatory and optional parameters,
|
||||
|
||||
* scope of configuration (per container, per app, per customer, per site, etc),
|
||||
|
||||
* frequency of changes in the configuration.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/hamba 80 www1:80 www2:80
|
||||
```
|
||||
|
||||
* Configuration is provided through command-line parameters.
|
||||
|
||||
* In the above example, the `ENTRYPOINT` is a script that will:
|
||||
|
||||
- parse the parameters,
|
||||
|
||||
- generate a configuration file,
|
||||
|
||||
- start the actual service.
|
||||
|
||||
---
|
||||
|
||||
## Command-line parameters pros and cons
|
||||
|
||||
* Appropriate for mandatory parameters (without which the service cannot start).
|
||||
|
||||
* Convenient for "toolbelt" services instanciated many times.
|
||||
|
||||
(Because there is no extra step: just run it!)
|
||||
|
||||
* Not great for dynamic configurations or bigger configurations.
|
||||
|
||||
(These things are still possible, but more cumbersome.)
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
```bash
|
||||
docker run -e ELASTICSEARCH_URL=http://es42:9201/ kibana
|
||||
```
|
||||
|
||||
* Configuration is provided through environment variables.
|
||||
|
||||
* The environment variable can be used straight by the program,
|
||||
<br/>or by a script generating a configuration file.
|
||||
|
||||
---
|
||||
|
||||
## Environment variables pros and cons
|
||||
|
||||
* Appropriate for optional parameters (since the image can provide default values).
|
||||
|
||||
* Also convenient for services instanciated many times.
|
||||
|
||||
(It's as easy as command-line parameters.)
|
||||
|
||||
* Great for services with lots of parameters, but you only want to specify a few.
|
||||
|
||||
(And use default values for everything else.)
|
||||
|
||||
* Ability to introspect possible parameters and their default values.
|
||||
|
||||
* Not great for dynamic configurations.
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration
|
||||
|
||||
```
|
||||
FROM prometheus
|
||||
COPY prometheus.conf /etc
|
||||
```
|
||||
|
||||
* The configuration is added to the image.
|
||||
|
||||
* The image may have a default configuration; the new configuration can:
|
||||
|
||||
- replace the default configuration,
|
||||
|
||||
- extend it (if the code can read multiple configuration files).
|
||||
|
||||
---
|
||||
|
||||
## Baked-in configuration pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to write a configuration file. (Obviously!)
|
||||
|
||||
* Requires to build an image to start the service.
|
||||
|
||||
* Requires to rebuild the image to reconfigure the service.
|
||||
|
||||
* Requires to rebuild the image to upgrade the service.
|
||||
|
||||
* Configured images can be stored in registries.
|
||||
|
||||
(Which is great, but requires a registry.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume
|
||||
|
||||
```bash
|
||||
docker run -v appconfig:/etc/appconfig myapp
|
||||
```
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The volume is attached to the container.
|
||||
|
||||
* The image may have a default configuration.
|
||||
|
||||
(But this results in a less "obvious" setup, that needs more documentation.)
|
||||
|
||||
---
|
||||
|
||||
## Configuration volume pros and cons
|
||||
|
||||
* Allows arbitrary customization and complex configuration files.
|
||||
|
||||
* Requires to create a volume for each different configuration.
|
||||
|
||||
* Services with identical configurations can use the same volume.
|
||||
|
||||
* Doesn't require to build / rebuild an image when upgrading / reconfiguring.
|
||||
|
||||
* Configuration can be generated or edited through another container.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume
|
||||
|
||||
* This is a powerful pattern for dynamic, complex configurations.
|
||||
|
||||
* The configuration is stored in a volume.
|
||||
|
||||
* The configuration is generated / updated by a special container.
|
||||
|
||||
* The application container detects when the configuration is changed.
|
||||
|
||||
(And automatically reloads the configuration when necessary.)
|
||||
|
||||
* The configuration can be shared between multiple services if needed.
|
||||
|
||||
---
|
||||
|
||||
## Dynamic configuration volume example
|
||||
|
||||
In a first terminal, start a load balancer with an initial configuration:
|
||||
|
||||
```bash
|
||||
$ docker run --name loadbalancer jpetazzo/hamba \
|
||||
80 goo.gl:80
|
||||
```
|
||||
|
||||
In another terminal, reconfigure that load balancer:
|
||||
|
||||
```bash
|
||||
$ docker run --rm --volumes-from loadbalancer jpetazzo/hamba reconfigure \
|
||||
80 google.com:80
|
||||
```
|
||||
|
||||
The configuration could also be updated through e.g. a REST API.
|
||||
|
||||
(The REST API being itself served from another container.)
|
||||
|
||||
---
|
||||
|
||||
## Keeping secrets
|
||||
|
||||
.warning[Ideally, you should not put secrets (passwords, tokens...) in:]
|
||||
|
||||
* command-line or environment variables (anyone with Docker API access can get them),
|
||||
|
||||
* images, especially stored in a registry.
|
||||
|
||||
Secrets management is better handled with an orchestrator (like Swarm or Kubernetes).
|
||||
|
||||
Orchestrators will allow to pass secrets in a "one-way" manner.
|
||||
|
||||
Managing secrets securely without an orchestrator can be contrived.
|
||||
|
||||
E.g.:
|
||||
|
||||
- read the secret on stdin when the service starts,
|
||||
|
||||
- pass the secret using an API endpoint.
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Background Containers
|
||||
# Background containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -117,7 +117,7 @@ CONTAINER ID IMAGE ... CREATED STATUS ...
|
||||
|
||||
Many Docker commands will work on container IDs: `docker stop`, `docker rm`...
|
||||
|
||||
If we want to list only the IDs of our containers (without the other colums
|
||||
If we want to list only the IDs of our containers (without the other columns
|
||||
or the header line),
|
||||
we can use the `-q` ("Quiet", "Quick") flag:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Building Images Interactively
|
||||
# Building images interactively
|
||||
|
||||
In this section, we will create our first container image.
|
||||
|
||||
@@ -16,27 +16,21 @@ We will:
|
||||
|
||||
---
|
||||
|
||||
## Building Images Interactively
|
||||
## The plan
|
||||
|
||||
As we have seen, the images on the Docker Hub are sometimes very basic.
|
||||
1. Create a container (with `docker run`) using our base distro of choice.
|
||||
|
||||
How do we want to construct our own images?
|
||||
2. Run a bunch of commands to install and set up our software in the container.
|
||||
|
||||
As an example, we will build an image that has `figlet`.
|
||||
3. (Optionally) review changes in the container with `docker diff`.
|
||||
|
||||
First, we will do it manually with `docker commit`.
|
||||
4. Turn the container into a new image with `docker commit`.
|
||||
|
||||
Then, in an upcoming chapter, we will use a `Dockerfile` and `docker build`.
|
||||
5. (Optionally) add tags to the image with `docker tag`.
|
||||
|
||||
---
|
||||
|
||||
## Building from a base
|
||||
|
||||
Our base will be the `ubuntu` image.
|
||||
|
||||
---
|
||||
|
||||
## Create a new container and make some changes
|
||||
## Setting up our container
|
||||
|
||||
Start an Ubuntu container:
|
||||
|
||||
@@ -107,7 +101,7 @@ As explained before:
|
||||
|
||||
---
|
||||
|
||||
## Commit and run your image
|
||||
## Commit our changes into a new image
|
||||
|
||||
The `docker commit` command will create a new layer with those changes,
|
||||
and a new image using this new layer.
|
||||
@@ -119,7 +113,13 @@ $ docker commit <yourContainerId>
|
||||
|
||||
The output of the `docker commit` command will be the ID for your newly created image.
|
||||
|
||||
We can run this image:
|
||||
We can use it as an argument to `docker run`.
|
||||
|
||||
---
|
||||
|
||||
## Testing our new image
|
||||
|
||||
Let's run this image:
|
||||
|
||||
```bash
|
||||
$ docker run -it <newImageId>
|
||||
@@ -131,6 +131,8 @@ root@fcfb62f0bfde:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
It works! .emoji[🎉]
|
||||
|
||||
---
|
||||
|
||||
## Tagging images
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Building Docker images with a Dockerfile
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -93,20 +93,22 @@ The output of `docker build` looks like this:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048 kB
|
||||
Sending build context to Docker daemon
|
||||
Step 0 : FROM ubuntu
|
||||
---> e54ca5efa2e9
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Step 2 : RUN apt-get install figlet
|
||||
---> Running in 2b44df762a2f
|
||||
---> f9e8f1642759
|
||||
Removing intermediate container 2b44df762a2f
|
||||
Successfully built f9e8f1642759
|
||||
docker build -t figlet .
|
||||
Sending build context to Docker daemon 2.048kB
|
||||
Step 1/3 : FROM ubuntu
|
||||
---> f975c5035748
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
Step 3/3 : RUN apt-get install figlet
|
||||
---> Running in c29230d70f9b
|
||||
(...output of the RUN command...)
|
||||
Removing intermediate container c29230d70f9b
|
||||
---> 0dfd7a253f21
|
||||
Successfully built 0dfd7a253f21
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
]
|
||||
|
||||
@@ -134,20 +136,20 @@ Sending build context to Docker daemon 2.048 kB
|
||||
## Executing each step
|
||||
|
||||
```bash
|
||||
Step 1 : RUN apt-get update
|
||||
---> Running in 840cb3533193
|
||||
Step 2/3 : RUN apt-get update
|
||||
---> Running in e01b294dbffd
|
||||
(...output of the RUN command...)
|
||||
---> 7257c37726a1
|
||||
Removing intermediate container 840cb3533193
|
||||
Removing intermediate container e01b294dbffd
|
||||
---> eb8d9b561b37
|
||||
```
|
||||
|
||||
* A container (`840cb3533193`) is created from the base image.
|
||||
* A container (`e01b294dbffd`) is created from the base image.
|
||||
|
||||
* The `RUN` command is executed in this container.
|
||||
|
||||
* The container is committed into an image (`7257c37726a1`).
|
||||
* The container is committed into an image (`eb8d9b561b37`).
|
||||
|
||||
* The build container (`840cb3533193`) is removed.
|
||||
* The build container (`e01b294dbffd`) is removed.
|
||||
|
||||
* The output of this step will be the base image for the next one.
|
||||
|
||||
@@ -188,7 +190,7 @@ root@91f3c974c9a1:/# figlet hello
|
||||
```
|
||||
|
||||
|
||||
Yay! 🎉
|
||||
Yay! .emoji[🎉]
|
||||
|
||||
---
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
class: title
|
||||
|
||||
# CMD and ENTRYPOINT
|
||||
# `CMD` and `ENTRYPOINT`
|
||||
|
||||

|
||||
|
||||
@@ -64,6 +64,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 042dff3b4a8d
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -141,7 +142,7 @@ Why did we use JSON syntax for our `ENTRYPOINT`?
|
||||
|
||||
* When CMD or ENTRYPOINT use string syntax, they get wrapped in `sh -c`.
|
||||
|
||||
* To avoid this wrapping, you must use JSON syntax.
|
||||
* To avoid this wrapping, we can use JSON syntax.
|
||||
|
||||
What if we used `ENTRYPOINT` with string syntax?
|
||||
|
||||
@@ -165,6 +166,7 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 36f588918d73
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
@@ -178,8 +180,6 @@ $ docker run figlet salut
|
||||
\/ \_/|_/|__/ \_/|_/|_/
|
||||
```
|
||||
|
||||
Great success!
|
||||
|
||||
---
|
||||
|
||||
## Using `CMD` and `ENTRYPOINT` together
|
||||
@@ -225,11 +225,11 @@ Let's build it:
|
||||
$ docker build -t figlet .
|
||||
...
|
||||
Successfully built 6e0b6a048a07
|
||||
Successfully tagged figlet:latest
|
||||
```
|
||||
|
||||
And run it:
|
||||
Run it without parameters:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run figlet
|
||||
_ _ _ _
|
||||
@@ -237,7 +237,15 @@ $ docker run figlet
|
||||
| | _ | | | | __ __ ,_ | | __|
|
||||
|/ \ |/ |/ |/ / \_ | | |_/ \_/ | |/ / |
|
||||
| |_/|__/|__/|__/\__/ \/ \/ \__/ |_/|__/\_/|_/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Overriding the image default parameters
|
||||
|
||||
Now let's pass extra arguments to the image.
|
||||
|
||||
```bash
|
||||
$ docker run figlet hola mundo
|
||||
_ _
|
||||
| | | | |
|
||||
@@ -245,7 +253,8 @@ $ docker run figlet hola mundo
|
||||
|/ \ / \_|/ / | / |/ |/ | | | / |/ | / | / \_
|
||||
| |_/\__/ |__/\_/|_/ | | |_/ \_/|_/ | |_/\_/|_/\__/
|
||||
```
|
||||
]
|
||||
|
||||
We overrode `CMD` but still used `ENTRYPOINT`.
|
||||
|
||||
---
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
|
||||
# Compose For Development Stacks
|
||||
# Compose for development stacks
|
||||
|
||||
Dockerfiles are great to build container images.
|
||||
|
||||
@@ -50,7 +49,7 @@ Before diving in, let's see a small example of Compose in action.
|
||||
|
||||
---
|
||||
|
||||
## Compose in action
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
@@ -61,6 +60,10 @@ Before diving in, let's see a small example of Compose in action.
|
||||
If you are using the official training virtual machines, Compose has been
|
||||
pre-installed.
|
||||
|
||||
If you are using Docker for Mac/Windows or the Docker Toolbox, Compose comes with them.
|
||||
|
||||
If you are on Linux (desktop or server environment), you will need to install Compose from its [release page](https://github.com/docker/compose/releases) or with `pip install docker-compose`.
|
||||
|
||||
You can always check that it is installed by running:
|
||||
|
||||
```bash
|
||||
@@ -113,6 +116,7 @@ them.
|
||||
|
||||
Here is the file used in the demo:
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
version: "2"
|
||||
|
||||
@@ -131,25 +135,37 @@ services:
|
||||
redis:
|
||||
image: redis
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Compose file structure
|
||||
|
||||
A Compose file has multiple sections:
|
||||
|
||||
* `version` is mandatory. (We should use `"2"` or later; version 1 is deprecated.)
|
||||
|
||||
* `services` is mandatory. A service is one or more replicas of the same image running as containers.
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-compose-file network.)
|
||||
|
||||
* `volumes` is optional and can define volumes to be used and/or shared by the containers.
|
||||
|
||||
---
|
||||
|
||||
## Compose file versions
|
||||
|
||||
Version 1 directly has the various containers (`www`, `redis`...) at the top level of the file.
|
||||
* Version 1 is legacy and shouldn't be used.
|
||||
|
||||
Version 2 has multiple sections:
|
||||
(If you see a Compose file without `version` and `services`, it's a legacy v1 file.)
|
||||
|
||||
* `version` is mandatory and should be `"2"`.
|
||||
* Version 2 added support for networks and volumes.
|
||||
|
||||
* `services` is mandatory and corresponds to the content of the version 1 format.
|
||||
* Version 3 added support for deployment options (scaling, rolling updates, etc).
|
||||
|
||||
* `networks` is optional and indicates to which networks containers should be connected.
|
||||
<br/>(By default, containers will be connected on a private, per-app network.)
|
||||
|
||||
* `volumes` is optional and can define volumes to be used and/or shared by the containers.
|
||||
|
||||
Version 3 adds support for deployment options (scaling, rolling updates, etc.)
|
||||
The [Docker documentation](https://docs.docker.com/compose/compose-file/)
|
||||
has excellent information about the Compose file format if you need to know more about versions.
|
||||
|
||||
---
|
||||
|
||||
@@ -259,6 +275,8 @@ Removing trainingwheels_www_1 ... done
|
||||
Removing trainingwheels_redis_1 ... done
|
||||
```
|
||||
|
||||
Use `docker-compose down -v` to remove everything including volumes.
|
||||
|
||||
---
|
||||
|
||||
## Special handling of volumes
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Connecting Containers With Links
|
||||
# Connecting containers with links
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
177
slides/containers/Container_Engines.md
Normal file
177
slides/containers/Container_Engines.md
Normal file
@@ -0,0 +1,177 @@
|
||||
# Docker Engine and other container engines
|
||||
|
||||
* We are going to cover the architecture of the Docker Engine.
|
||||
|
||||
* We will also present other container engines.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine external architecture
|
||||
|
||||
* The Engine is a daemon (service running in the background).
|
||||
|
||||
* All interaction is done through a REST API exposed over a socket.
|
||||
|
||||
* On Linux, the default socket is a UNIX socket: `/var/run/docker.sock`.
|
||||
|
||||
* We can also use a TCP socket, with optional mutual TLS authentication.
|
||||
|
||||
* The `docker` CLI communicates with the Engine over the socket.
|
||||
|
||||
Note: strictly speaking, the Docker API is not fully REST.
|
||||
|
||||
Some operations (e.g. dealing with interactive containers
|
||||
and log streaming) don't fit the REST model.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Docker Engine internal architecture
|
||||
|
||||
* Up to Docker 1.10: the Docker Engine is one single monolithic binary.
|
||||
|
||||
* Starting with Docker 1.11, the Engine is split into multiple parts:
|
||||
|
||||
- `dockerd` (REST API, auth, networking, storage)
|
||||
|
||||
- `containerd` (container lifecycle, controlled over a gRPC API)
|
||||
|
||||
- `containerd-shim` (per-container; does almost nothing but allows to restart the Engine without restarting the containers)
|
||||
|
||||
- `runc` (per-container; does the actual heavy lifting to start the container)
|
||||
|
||||
* Some features (like image and snapshot management) are progressively being pushed from `dockerd` to `containerd`.
|
||||
|
||||
For more details, check [this short presentation by Phil Estes](https://www.slideshare.net/PhilEstes/diving-through-the-layers-investigating-runc-containerd-and-the-docker-engine-architecture).
|
||||
|
||||
---
|
||||
|
||||
## Other container engines
|
||||
|
||||
The following list is not exhaustive.
|
||||
|
||||
Furthermore, we limited the scope to Linux containers.
|
||||
|
||||
Containers also exist (sometimes with other names) on Windows, macOS, Solaris, FreeBSD ...
|
||||
|
||||
---
|
||||
|
||||
## LXC
|
||||
|
||||
* The venerable ancestor (first released in 2008).
|
||||
|
||||
* Docker initially relied on it to execute containers.
|
||||
|
||||
* No daemon; no central API.
|
||||
|
||||
* Each container is managed by a `lxc-start` process.
|
||||
|
||||
* Each `lxc-start` process exposes a custom API over a local UNIX socket, allowing to interact with the container.
|
||||
|
||||
* No notion of image (container filesystems have to be managed manually).
|
||||
|
||||
* Networking has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## LXD
|
||||
|
||||
* Re-uses LXC code (through liblxc).
|
||||
|
||||
* Builds on top of LXC to offer a more modern experience.
|
||||
|
||||
* Daemon exposing a REST API.
|
||||
|
||||
* Can manage images, snapshots, migrations, networking, storage.
|
||||
|
||||
* "offers a user experience similar to virtual machines but using Linux containers instead."
|
||||
|
||||
---
|
||||
|
||||
## rkt
|
||||
|
||||
* Compares to `runc`.
|
||||
|
||||
* No daemon or API.
|
||||
|
||||
* Strong emphasis on security (through privilege separation).
|
||||
|
||||
* Networking has to be setup separately (e.g. through CNI plugins).
|
||||
|
||||
* Partial image management (pull, but no push).
|
||||
|
||||
(Image build is handled by separate tools.)
|
||||
|
||||
---
|
||||
|
||||
## CRI-O
|
||||
|
||||
* Designed to be used with Kubernetes as a simple, basic runtime.
|
||||
|
||||
* Compares to `containerd`.
|
||||
|
||||
* Daemon exposing a gRPC interface.
|
||||
|
||||
* Controlled using the CRI API (Container Runtime Interface defined by Kubernetes).
|
||||
|
||||
* Needs an underlying OCI runtime (e.g. runc).
|
||||
|
||||
* Handles storage, images, networking (through CNI plugins).
|
||||
|
||||
We're not aware of anyone using it directly (i.e. outside of Kubernetes).
|
||||
|
||||
---
|
||||
|
||||
## systemd
|
||||
|
||||
* "init" system (PID 1) in most modern Linux distributions.
|
||||
|
||||
* Offers tools like `systemd-nspawn` and `machinectl` to manage containers.
|
||||
|
||||
* `systemd-nspawn` is "In many ways it is similar to chroot(1), but more powerful".
|
||||
|
||||
* `machinectl` can interact with VMs and containers managed by systemd.
|
||||
|
||||
* Exposes a DBUS API.
|
||||
|
||||
* Basic image support (tar archives and raw disk images).
|
||||
|
||||
* Network has to be setup manually.
|
||||
|
||||
---
|
||||
|
||||
## Overall ...
|
||||
|
||||
* The Docker Engine is very developer-centric:
|
||||
|
||||
- easy to install
|
||||
|
||||
- easy to use
|
||||
|
||||
- no manual setup
|
||||
|
||||
- first-class image build and transfer
|
||||
|
||||
* As a result, it is a fantastic tool in development environments.
|
||||
|
||||
* On servers:
|
||||
|
||||
- Docker is a good default choice
|
||||
|
||||
- If you use Kubernetes, the engine doesn't matter
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# The Container Network Model
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -65,9 +65,17 @@ eb0eeab782f4 host host
|
||||
|
||||
* A network is managed by a *driver*.
|
||||
|
||||
* All the drivers that we have seen before are available.
|
||||
* The built-in drivers include:
|
||||
|
||||
* A new multi-host driver, *overlay*, is available out of the box.
|
||||
* `bridge` (default)
|
||||
|
||||
* `none`
|
||||
|
||||
* `host`
|
||||
|
||||
* `macvlan`
|
||||
|
||||
* A multi-host driver, *overlay*, is available out of the box (for Swarm clusters).
|
||||
|
||||
* More drivers can be provided by plugins (OVS, VLAN...)
|
||||
|
||||
@@ -75,6 +83,8 @@ eb0eeab782f4 host host
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Differences with the CNI
|
||||
|
||||
* CNI = Container Network Interface
|
||||
@@ -87,6 +97,22 @@ eb0eeab782f4 host host
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Single container in a Docker network
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Two containers on two Docker networks
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Creating a network
|
||||
|
||||
Let's create a network called `dev`.
|
||||
@@ -126,13 +152,16 @@ $ docker run -d --name es --net dev elasticsearch:2
|
||||
|
||||
Now, create another container on this network.
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run -ti --net dev alpine sh
|
||||
root@0ecccdfa45ef:/#
|
||||
```
|
||||
]
|
||||
|
||||
From this new container, we can resolve and ping the other one, using its assigned name:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
/ # ping es
|
||||
PING es (172.18.0.2) 56(84) bytes of data.
|
||||
@@ -145,6 +174,7 @@ PING es (172.18.0.2) 56(84) bytes of data.
|
||||
rtt min/avg/max/mdev = 0.114/0.149/0.221/0.052 ms
|
||||
root@0ecccdfa45ef:/#
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -155,6 +185,7 @@ class: extra-details
|
||||
In Docker Engine 1.9, name resolution is implemented with `/etc/hosts`, and
|
||||
updating it each time containers are added/removed.
|
||||
|
||||
.small[
|
||||
```bash
|
||||
[root@0ecccdfa45ef /]# cat /etc/hosts
|
||||
172.18.0.3 0ecccdfa45ef
|
||||
@@ -167,6 +198,7 @@ ff02::2 ip6-allrouters
|
||||
172.18.0.2 es
|
||||
172.18.0.2 es.dev
|
||||
```
|
||||
]
|
||||
|
||||
In Docker Engine 1.10, this has been replaced by a dynamic resolver.
|
||||
|
||||
@@ -174,7 +206,7 @@ In Docker Engine 1.10, this has been replaced by a dynamic resolver.
|
||||
|
||||
---
|
||||
|
||||
## Connecting multiple containers together
|
||||
# Service discovery with containers
|
||||
|
||||
* Let's try to run an application that requires two containers.
|
||||
|
||||
@@ -210,9 +242,7 @@ $ docker ps -l
|
||||
|
||||
* If we connect to the application now, we will see an error page:
|
||||
|
||||
.small[
|
||||

|
||||
]
|
||||
|
||||
* This is because the Redis service is not running.
|
||||
* This container tries to resolve the name `redis`.
|
||||
@@ -241,9 +271,7 @@ $ docker run --net dev --name redis -d redis
|
||||
|
||||
* If we connect to the application now, we should see that the app is working correctly:
|
||||
|
||||
.small[
|
||||

|
||||
]
|
||||
|
||||
* When the app tries to resolve `redis`, instead of getting a DNS error, it gets the IP address of our Redis container.
|
||||
|
||||
@@ -282,7 +310,7 @@ since we wiped out the old Redis container).
|
||||
|
||||
---
|
||||
|
||||
class: x-extra-details
|
||||
class: extra-details
|
||||
|
||||
## Names are *local* to each network
|
||||
|
||||
@@ -322,7 +350,7 @@ class: extra-details
|
||||
Create the `prod` network.
|
||||
|
||||
```bash
|
||||
$ docker create network prod
|
||||
$ docker network create prod
|
||||
5a41562fecf2d8f115bedc16865f7336232a04268bdf2bd816aecca01b68d50c
|
||||
```
|
||||
|
||||
@@ -362,6 +390,7 @@ Each ElasticSearch instance has a name (generated when it is started). This name
|
||||
|
||||
Try the following command a few times:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run --rm --net dev centos curl -s es:9200
|
||||
{
|
||||
@@ -369,9 +398,11 @@ $ docker run --rm --net dev centos curl -s es:9200
|
||||
...
|
||||
}
|
||||
```
|
||||
]
|
||||
|
||||
Then try it a few times by replacing `--net dev` with `--net prod`:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run --rm --net prod centos curl -s es:9200
|
||||
{
|
||||
@@ -379,6 +410,7 @@ $ docker run --rm --net prod centos curl -s es:9200
|
||||
...
|
||||
}
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
@@ -428,7 +460,7 @@ When creating a network, extra options can be provided.
|
||||
|
||||
* `--internal` disables outbound traffic (the network won't have a default gateway).
|
||||
|
||||
* `--gateway` indicates which address to use for the gateway (when utbound traffic is allowed).
|
||||
* `--gateway` indicates which address to use for the gateway (when outbound traffic is allowed).
|
||||
|
||||
* `--subnet` (in CIDR notation) indicates the subnet to use.
|
||||
|
||||
@@ -466,11 +498,13 @@ b2887adeb5578a01fd9c55c435cad56bbbe802350711d2743691f95743680b09
|
||||
|
||||
* If containers span multiple hosts, we need an *overlay* network to connect them together.
|
||||
|
||||
* Docker ships with a default network plugin, `overlay`, implementing an overlay network leveraging VXLAN.
|
||||
* Docker ships with a default network plugin, `overlay`, implementing an overlay network leveraging
|
||||
VXLAN, *enabled with Swarm Mode*.
|
||||
|
||||
* Other plugins (Weave, Calico...) can provide overlay networks as well.
|
||||
|
||||
* Once you have an overlay network, *all the features that we've used in this chapter work identically.*
|
||||
* Once you have an overlay network, *all the features that we've used in this chapter work identically
|
||||
across multiple hosts.*
|
||||
|
||||
---
|
||||
|
||||
@@ -486,7 +520,7 @@ Very short instructions:
|
||||
- `docker network create mynet --driver overlay`
|
||||
- `docker service create --network mynet myimage`
|
||||
|
||||
See http://jpetazzo.github.io/orchestration-workshop for all the deets about clustering!
|
||||
See http://jpetazzo.github.io/container.training for all the deets about clustering!
|
||||
|
||||
---
|
||||
|
||||
@@ -508,13 +542,174 @@ General idea:
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
## Connecting and disconnecting dynamically
|
||||
|
||||
We've learned how to:
|
||||
* So far, we have specified which network to use when starting the container.
|
||||
|
||||
* Create private networks for groups of containers.
|
||||
* The Docker Engine also allows to connect and disconnect while the container runs.
|
||||
|
||||
* Assign IP addresses to containers.
|
||||
* This feature is exposed through the Docker API, and through two Docker CLI commands:
|
||||
|
||||
* Use container naming to implement service discovery.
|
||||
* `docker network connect <network> <container>`
|
||||
|
||||
* `docker network disconnect <network> <container>`
|
||||
|
||||
---
|
||||
|
||||
## Dynamically connecting to a network
|
||||
|
||||
* We have a container named `es` connected to a network named `dev`.
|
||||
|
||||
* Let's start a simple alpine container on the default network:
|
||||
|
||||
```bash
|
||||
$ docker run -ti alpine sh
|
||||
/ #
|
||||
```
|
||||
|
||||
* In this container, try to ping the `es` container:
|
||||
|
||||
```bash
|
||||
/ # ping es
|
||||
ping: bad address 'es'
|
||||
```
|
||||
|
||||
This doesn't work, but we will change that by connecting the container.
|
||||
|
||||
---
|
||||
|
||||
## Finding the container ID and connecting it
|
||||
|
||||
* Figure out the ID of our alpine container; here are two methods:
|
||||
|
||||
* looking at `/etc/hostname` in the container,
|
||||
|
||||
* running `docker ps -lq` on the host.
|
||||
|
||||
* Run the following command on the host:
|
||||
|
||||
```bash
|
||||
$ docker network connect dev `<container_id>`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking what we did
|
||||
|
||||
* Try again to `ping es` from the container.
|
||||
|
||||
* It should now work correctly:
|
||||
|
||||
```bash
|
||||
/ # ping es
|
||||
PING es (172.20.0.3): 56 data bytes
|
||||
64 bytes from 172.20.0.3: seq=0 ttl=64 time=0.376 ms
|
||||
64 bytes from 172.20.0.3: seq=1 ttl=64 time=0.130 ms
|
||||
^C
|
||||
```
|
||||
|
||||
* Interrupt it with Ctrl-C.
|
||||
|
||||
---
|
||||
|
||||
## Looking at the network setup in the container
|
||||
|
||||
We can look at the list of network interfaces with `ifconfig`, `ip a`, or `ip l`:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
/ # ip a
|
||||
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1000
|
||||
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
||||
inet 127.0.0.1/8 scope host lo
|
||||
valid_lft forever preferred_lft forever
|
||||
18: eth0@if19: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||
link/ether 02:42:ac:11:00:02 brd ff:ff:ff:ff:ff:ff
|
||||
inet 172.17.0.2/16 brd 172.17.255.255 scope global eth0
|
||||
valid_lft forever preferred_lft forever
|
||||
20: eth1@if21: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
|
||||
link/ether 02:42:ac:14:00:04 brd ff:ff:ff:ff:ff:ff
|
||||
inet 172.20.0.4/16 brd 172.20.255.255 scope global eth1
|
||||
valid_lft forever preferred_lft forever
|
||||
/ #
|
||||
```
|
||||
]
|
||||
|
||||
Each network connection is materialized with a virtual network interface.
|
||||
|
||||
As we can see, we can be connected to multiple networks at the same time.
|
||||
|
||||
---
|
||||
|
||||
## Disconnecting from a network
|
||||
|
||||
* Let's try the symmetrical command to disconnect the container:
|
||||
```bash
|
||||
$ docker network disconnect dev <container_id>
|
||||
```
|
||||
|
||||
* From now on, if we try to ping `es`, it will not resolve:
|
||||
```bash
|
||||
/ # ping es
|
||||
ping: bad address 'es'
|
||||
```
|
||||
|
||||
* Trying to ping the IP address directly won't work either:
|
||||
```bash
|
||||
/ # ping 172.20.0.3
|
||||
... (nothing happens until we interrupt it with Ctrl-C)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Network aliases are scoped per network
|
||||
|
||||
* Each network has its own set of network aliases.
|
||||
|
||||
* We saw this earlier: `es` resolves to different addresses in `dev` and `prod`.
|
||||
|
||||
* If we are connected to multiple networks, the resolver looks up names in each of them
|
||||
(as of Docker Engine 18.03, it is the connection order) and stops as soon as the name
|
||||
is found.
|
||||
|
||||
* Therefore, if we are connected to both `dev` and `prod`, resolving `es` will **not**
|
||||
give us the addresses of all the `es` services; but only the ones in `dev` or `prod`.
|
||||
|
||||
* However, we can lookup `es.dev` or `es.prod` if we need to.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Finding out about our networks and names
|
||||
|
||||
* We can do reverse DNS lookups on containers' IP addresses.
|
||||
|
||||
* If the IP address belongs to a network (other than the default bridge), the result will be:
|
||||
|
||||
```
|
||||
name-or-first-alias-or-container-id.network-name
|
||||
```
|
||||
|
||||
* Example:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker run -ti --net prod --net-alias hello alpine
|
||||
/ # apk add --no-cache drill
|
||||
...
|
||||
OK: 5 MiB in 13 packages
|
||||
/ # ifconfig
|
||||
eth0 Link encap:Ethernet HWaddr 02:42:AC:15:00:03
|
||||
inet addr:`172.21.0.3` Bcast:172.21.255.255 Mask:255.255.0.0
|
||||
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
|
||||
...
|
||||
/ # drill -t ptr `3.0.21.172`.in-addr.arpa
|
||||
...
|
||||
;; ANSWER SECTION:
|
||||
3.0.21.172.in-addr.arpa. 600 IN PTR `hello.prod`.
|
||||
...
|
||||
```
|
||||
]
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Container Networking Basics
|
||||
# Container networking basics
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -49,14 +49,14 @@ We will use `docker ps`:
|
||||
|
||||
```bash
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32769->80/tcp, 0.0.0.0:32768->443/tcp ...
|
||||
CONTAINER ID IMAGE ... PORTS ...
|
||||
e40ffb406c9e nginx ... 0.0.0.0:32768->80/tcp ...
|
||||
```
|
||||
|
||||
|
||||
* The web server is running on ports 80 and 443 inside the container.
|
||||
* The web server is running on port 80 inside the container.
|
||||
|
||||
* Those ports are mapped to ports 32769 and 32768 on our Docker host.
|
||||
* This port is mapped to port 32768 on our Docker host.
|
||||
|
||||
We will explain the whys and hows of this port mapping.
|
||||
|
||||
@@ -69,7 +69,7 @@ But first, let's make sure that everything works properly.
|
||||
Point your browser to the IP address of your Docker host, on the port
|
||||
shown by `docker ps` for container port 80.
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -81,7 +81,7 @@ Make sure to use the right port number if it is different
|
||||
from the example below:
|
||||
|
||||
```bash
|
||||
$ curl localhost:32769
|
||||
$ curl localhost:32768
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
@@ -91,6 +91,31 @@ $ curl localhost:32769
|
||||
|
||||
---
|
||||
|
||||
## How does Docker know which port to map?
|
||||
|
||||
* There is metadata in the image telling "this image has something on port 80".
|
||||
|
||||
* We can see that metadata with `docker inspect`:
|
||||
|
||||
```bash
|
||||
$ docker inspect --format '{{.Config.ExposedPorts}}' nginx
|
||||
map[80/tcp:{}]
|
||||
```
|
||||
|
||||
* This metadata was set in the Dockerfile, with the `EXPOSE` keyword.
|
||||
|
||||
* We can see that with `docker history`:
|
||||
|
||||
```bash
|
||||
$ docker history nginx
|
||||
IMAGE CREATED CREATED BY
|
||||
7f70b30f2cc6 11 days ago /bin/sh -c #(nop) CMD ["nginx" "-g" "…
|
||||
<missing> 11 days ago /bin/sh -c #(nop) STOPSIGNAL [SIGTERM]
|
||||
<missing> 11 days ago /bin/sh -c #(nop) EXPOSE 80/tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Why are we mapping ports?
|
||||
|
||||
* We are out of IPv4 addresses.
|
||||
@@ -113,7 +138,7 @@ There is a command to help us:
|
||||
|
||||
```bash
|
||||
$ docker port <containerID> 80
|
||||
32769
|
||||
32768
|
||||
```
|
||||
|
||||
---
|
||||
@@ -128,7 +153,7 @@ $ docker run -d -p 8000:80 nginx
|
||||
$ docker run -d -p 8080:80 -p 8888:80 nginx
|
||||
```
|
||||
|
||||
* We are running two NGINX web servers.
|
||||
* We are running three NGINX web servers.
|
||||
* The first one is exposed on port 80.
|
||||
* The second one is exposed on port 8000.
|
||||
* The third one is exposed on ports 8080 and 8888.
|
||||
@@ -189,87 +214,6 @@ $ ping <ipAddress>
|
||||
|
||||
---
|
||||
|
||||
## The different network drivers
|
||||
|
||||
A container can use one of the following drivers:
|
||||
|
||||
* `bridge` (default)
|
||||
* `none`
|
||||
* `host`
|
||||
* `container`
|
||||
|
||||
The driver is selected with `docker run --net ...`.
|
||||
|
||||
The different drivers are explained with more details on the following slides.
|
||||
|
||||
---
|
||||
|
||||
## The default bridge
|
||||
|
||||
* By default, the container gets a virtual `eth0` interface.
|
||||
<br/>(In addition to its own private `lo` loopback interface.)
|
||||
|
||||
* That interface is provided by a `veth` pair.
|
||||
|
||||
* It is connected to the Docker bridge.
|
||||
<br/>(Named `docker0` by default; configurable with `--bridge`.)
|
||||
|
||||
* Addresses are allocated on a private, internal subnet.
|
||||
<br/>(Docker uses 172.17.0.0/16 by default; configurable with `--bip`.)
|
||||
|
||||
* Outbound traffic goes through an iptables MASQUERADE rule.
|
||||
|
||||
* Inbound traffic goes through an iptables DNAT rule.
|
||||
|
||||
* The container can have its own routes, iptables rules, etc.
|
||||
|
||||
---
|
||||
|
||||
## The null driver
|
||||
|
||||
* Container is started with `docker run --net none ...`
|
||||
|
||||
* It only gets the `lo` loopback interface. No `eth0`.
|
||||
|
||||
* It can't send or receive network traffic.
|
||||
|
||||
* Useful for isolated/untrusted workloads.
|
||||
|
||||
---
|
||||
|
||||
## The host driver
|
||||
|
||||
* Container is started with `docker run --net host ...`
|
||||
|
||||
* It sees (and can access) the network interfaces of the host.
|
||||
|
||||
* It can bind any address, any port (for ill and for good).
|
||||
|
||||
* Network traffic doesn't have to go through NAT, bridge, or veth.
|
||||
|
||||
* Performance = native!
|
||||
|
||||
Use cases:
|
||||
|
||||
* Performance sensitive applications (VOIP, gaming, streaming...)
|
||||
|
||||
* Peer discovery (e.g. Erlang port mapper, Raft, Serf...)
|
||||
|
||||
---
|
||||
|
||||
## The container driver
|
||||
|
||||
* Container is started with `docker run --net container:id ...`
|
||||
|
||||
* It re-uses the network stack of another container.
|
||||
|
||||
* It shares with this other container the same interfaces, IP address(es), routes, iptables rules, etc.
|
||||
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
3
slides/containers/Containers_From_Scratch.md
Normal file
3
slides/containers/Containers_From_Scratch.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Building containers from scratch
|
||||
|
||||
(This is a "bonus section" done if time permits.)
|
||||
339
slides/containers/Copy_On_Write.md
Normal file
339
slides/containers/Copy_On_Write.md
Normal file
@@ -0,0 +1,339 @@
|
||||
# Copy-on-write filesystems
|
||||
|
||||
Container engines rely on copy-on-write to be able
|
||||
to start containers quickly, regardless of their size.
|
||||
|
||||
We will explain how that works, and review some of
|
||||
the copy-on-write storage systems available on Linux.
|
||||
|
||||
---
|
||||
|
||||
## What is copy-on-write?
|
||||
|
||||
- Copy-on-write is a mechanism allowing to share data.
|
||||
|
||||
- The data appears to be a copy, but is only
|
||||
a link (or reference) to the original data.
|
||||
|
||||
- The actual copy happens only when someone
|
||||
tries to change the shared data.
|
||||
|
||||
- Whoever changes the shared data ends up
|
||||
using their own copy instead of the shared data.
|
||||
|
||||
---
|
||||
|
||||
## A few metaphors
|
||||
|
||||
--
|
||||
|
||||
- First metaphor:
|
||||
<br/>white board and tracing paper
|
||||
|
||||
--
|
||||
|
||||
- Second metaphor:
|
||||
<br/>magic books with shadowy pages
|
||||
|
||||
--
|
||||
|
||||
- Third metaphor:
|
||||
<br/>just-in-time house building
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write is *everywhere*
|
||||
|
||||
- Process creation with `fork()`.
|
||||
|
||||
- Consistent disk snapshots.
|
||||
|
||||
- Efficient VM provisioning.
|
||||
|
||||
- And, of course, containers.
|
||||
|
||||
---
|
||||
|
||||
## Copy-on-write and containers
|
||||
|
||||
Copy-on-write is essential to give us "convenient" containers.
|
||||
|
||||
- Creating a new container (from an existing image) is "free".
|
||||
|
||||
(Otherwise, we would have to copy the image first.)
|
||||
|
||||
- Customizing a container (by tweaking a few files) is cheap.
|
||||
|
||||
(Adding a 1 KB configuration file to a 1 GB container takes 1 KB, not 1 GB.)
|
||||
|
||||
- We can take snapshots, i.e. have "checkpoints" or "save points"
|
||||
when building images.
|
||||
|
||||
---
|
||||
|
||||
## AUFS overview
|
||||
|
||||
- The original (legacy) copy-on-write filesystem used by first versions of Docker.
|
||||
|
||||
- Combine multiple *branches* in a specific order.
|
||||
|
||||
- Each branch is just a normal directory.
|
||||
|
||||
- You generally have:
|
||||
|
||||
- at least one read-only branch (at the bottom),
|
||||
|
||||
- exactly one read-write branch (at the top).
|
||||
|
||||
(But other fun combinations are possible too!)
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: opening a file
|
||||
|
||||
- With `O_RDONLY` - read-only access:
|
||||
|
||||
- look it up in each branch, starting from the top
|
||||
|
||||
- open the first one we find
|
||||
|
||||
- With `O_WRONLY` or `O_RDWR` - write access:
|
||||
|
||||
- if the file exists on the top branch: open it
|
||||
|
||||
- if the file exists on another branch: "copy up"
|
||||
<br/>
|
||||
(i.e. copy the file to the top branch and open the copy)
|
||||
|
||||
- if the file doesn't exist on any branch: create it on the top branch
|
||||
|
||||
That "copy-up" operation can take a while if the file is big!
|
||||
|
||||
---
|
||||
|
||||
## AUFS operations: deleting a file
|
||||
|
||||
- A *whiteout* file is created.
|
||||
|
||||
- This is similar to the concept of "tombstones" used in some data systems.
|
||||
|
||||
```
|
||||
# docker run ubuntu rm /etc/shadow
|
||||
|
||||
# ls -la /var/lib/docker/aufs/diff/$(docker ps --no-trunc -lq)/etc
|
||||
total 8
|
||||
drwxr-xr-x 2 root root 4096 Jan 27 15:36 .
|
||||
drwxr-xr-x 5 root root 4096 Jan 27 15:36 ..
|
||||
-r--r--r-- 2 root root 0 Jan 27 15:36 .wh.shadow
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## AUFS performance
|
||||
|
||||
- AUFS `mount()` is fast, so creation of containers is quick.
|
||||
|
||||
- Read/write access has native speeds.
|
||||
|
||||
- But initial `open()` is expensive in two scenarios:
|
||||
|
||||
- when writing big files (log files, databases ...),
|
||||
|
||||
- when searching many directories (PATH, classpath, etc.) over many layers.
|
||||
|
||||
- Protip: when we built dotCloud, we ended up putting
|
||||
all important data on *volumes*.
|
||||
|
||||
- When starting the same container multiple times:
|
||||
|
||||
- the data is loaded only once from disk, and cached only once in memory;
|
||||
|
||||
- but `dentries` will be duplicated.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper
|
||||
|
||||
Device Mapper is a rich subsystem with many features.
|
||||
|
||||
It can be used for: RAID, encrypted devices, snapshots, and more.
|
||||
|
||||
In the context of containers (and Docker in particular), "Device Mapper"
|
||||
means:
|
||||
|
||||
"the Device Mapper system + its *thin provisioning target*"
|
||||
|
||||
If you see the abbreviation "thinp" it stands for "thin provisioning".
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper principles
|
||||
|
||||
- Copy-on-write happens on the *block* level
|
||||
(instead of the *file* level).
|
||||
|
||||
- Each container and each image get their own block device.
|
||||
|
||||
- At any given time, it is possible to take a snapshot:
|
||||
|
||||
- of an existing container (to create a frozen image),
|
||||
|
||||
- of an existing image (to create a container from it).
|
||||
|
||||
- If a block has never been written to:
|
||||
|
||||
- it's assumed to be all zeros,
|
||||
|
||||
- it's not allocated on disk.
|
||||
|
||||
(That last property is the reason for the name "thin" provisioning.)
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper operational details
|
||||
|
||||
- Two storage areas are needed:
|
||||
one for *data*, another for *metadata*.
|
||||
|
||||
- "data" is also called the "pool"; it's just a big pool of blocks.
|
||||
|
||||
(Docker uses the smallest possible block size, 64 KB.)
|
||||
|
||||
- "metadata" contains the mappings between virtual offsets (in the
|
||||
snapshots) and physical offsets (in the pool).
|
||||
|
||||
- Each time a new block (or a copy-on-write block) is written,
|
||||
a block is allocated from the pool.
|
||||
|
||||
- When there are no more blocks in the pool, attempts to write
|
||||
will stall until the pool is increased (or the write operation
|
||||
aborted).
|
||||
|
||||
- In other words: when running out of space, containers are
|
||||
frozen, but operations will resume as soon as space is available.
|
||||
|
||||
---
|
||||
|
||||
## Device Mapper performance
|
||||
|
||||
- By default, Docker puts data and metadata on a loop device
|
||||
backed by a sparse file.
|
||||
|
||||
- This is great from a usability point of view,
|
||||
since zero configuration is needed.
|
||||
|
||||
- But it is terrible from a performance point of view:
|
||||
|
||||
- each time a container writes to a new block,
|
||||
- a block has to be allocated from the pool,
|
||||
- and when it's written to,
|
||||
- a block has to be allocated from the sparse file,
|
||||
- and sparse file performance isn't great anyway.
|
||||
|
||||
- If you use Device Mapper, make sure to put data (and metadata)
|
||||
on devices!
|
||||
|
||||
---
|
||||
|
||||
## BTRFS principles
|
||||
|
||||
- BTRFS is a filesystem (like EXT4, XFS, NTFS...) with built-in snapshots.
|
||||
|
||||
- The "copy-on-write" happens at the filesystem level.
|
||||
|
||||
- BTRFS integrates the snapshot and block pool management features
|
||||
at the filesystem level.
|
||||
|
||||
(Instead of the block level for Device Mapper.)
|
||||
|
||||
- In practice, we create a "subvolume" and
|
||||
later take a "snapshot" of that subvolume.
|
||||
|
||||
Imagine: `mkdir` with Super Powers and `cp -a` with Super Powers.
|
||||
|
||||
- These operations can be executed with the `btrfs` CLI tool.
|
||||
|
||||
---
|
||||
|
||||
## BTRFS in practice with Docker
|
||||
|
||||
- Docker can use BTRFS and its snapshotting features to store container images.
|
||||
|
||||
- The only requirement is that `/var/lib/docker` is on a BTRFS filesystem.
|
||||
|
||||
(Or, the directory specified with the `--data-root` flag when starting the engine.)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## BTRFS quirks
|
||||
|
||||
- BTRFS works by dividing its storage in *chunks*.
|
||||
|
||||
- A chunk can contain data or metadata.
|
||||
|
||||
- You can run out of chunks (and get `No space left on device`)
|
||||
even though `df` shows space available.
|
||||
|
||||
(Because chunks are only partially allocated.)
|
||||
|
||||
- Quick fix:
|
||||
|
||||
```
|
||||
# btrfs filesys balance start -dusage=1 /var/lib/docker
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Overlay2
|
||||
|
||||
- Overlay2 is very similar to AUFS.
|
||||
|
||||
- However, it has been merged in "upstream" kernel.
|
||||
|
||||
- It is therefore available on all modern kernels.
|
||||
|
||||
(AUFS was available on Debian and Ubuntu, but required custom kernels on other distros.)
|
||||
|
||||
- It is simpler than AUFS (it can only have two branches, called "layers").
|
||||
|
||||
- The container engine abstracts this detail, so this is not a concern.
|
||||
|
||||
- Overlay2 storage drivers generally use hard links between layers.
|
||||
|
||||
- This improves `stat()` and `open()` performance, at the expense of inode usage.
|
||||
|
||||
---
|
||||
|
||||
## ZFS
|
||||
|
||||
- ZFS is similar to BTRFS (at least from a container user's perspective).
|
||||
|
||||
- Pros:
|
||||
|
||||
- high performance
|
||||
- high reliability (with e.g. data checksums)
|
||||
- optional data compression and deduplication
|
||||
|
||||
- Cons:
|
||||
|
||||
- high memory usage
|
||||
- not in upstream kernel
|
||||
|
||||
- It is available as a kernel module or through FUSE.
|
||||
|
||||
---
|
||||
|
||||
## Which one is the best?
|
||||
|
||||
- Eventually, overlay2 should be the best option.
|
||||
|
||||
- It is available on all modern systems.
|
||||
|
||||
- Its memory usage is better than Device Mapper, BTRFS, or ZFS.
|
||||
|
||||
- The remarks about *write performance* shouldn't bother you:
|
||||
<br/>
|
||||
data should always be stored in volumes anyway!
|
||||
|
||||
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Copying files during the build
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -64,7 +64,7 @@ Create this Dockerfile.
|
||||
|
||||
## Testing our C program
|
||||
|
||||
* Create `hello.c` and `Dockerfile` in the same direcotry.
|
||||
* Create `hello.c` and `Dockerfile` in the same directory.
|
||||
|
||||
* Run `docker build -t hello .` in this directory.
|
||||
|
||||
@@ -93,7 +93,7 @@ Success!
|
||||
* Older Dockerfiles also have the `ADD` instruction.
|
||||
<br/>It is similar but can automatically extract archives.
|
||||
|
||||
* If we really wanted to compile C code in a compiler, we would:
|
||||
* If we really wanted to compile C code in a container, we would:
|
||||
|
||||
* Place it in a different directory, with the `WORKDIR` instruction.
|
||||
|
||||
@@ -10,17 +10,19 @@
|
||||
|
||||
* [Solaris Containers (2004)](https://en.wikipedia.org/wiki/Solaris_Containers)
|
||||
|
||||
* [FreeBSD jails (1999)](https://www.freebsd.org/cgi/man.cgi?query=jail&sektion=8&manpath=FreeBSD+4.0-RELEASE)
|
||||
* [FreeBSD jails (1999-2000)](https://www.freebsd.org/cgi/man.cgi?query=jail&sektion=8&manpath=FreeBSD+4.0-RELEASE)
|
||||
|
||||
Containers have been around for a *very long time* indeed.
|
||||
|
||||
(See [this excellent blog post by Serge Hallyn](https://s3hh.wordpress.com/2018/03/22/history-of-containers/) for more historic details.)
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The VPS age (until 2007-2008)
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
81
slides/containers/Docker_Machine.md
Normal file
81
slides/containers/Docker_Machine.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Managing hosts with Docker Machine
|
||||
|
||||
- Docker Machine is a tool to provision and manage Docker hosts.
|
||||
|
||||
- It automates the creation of a virtual machine:
|
||||
|
||||
- locally, with a tool like VirtualBox or VMware;
|
||||
|
||||
- on a public cloud like AWS EC2, Azure, Digital Ocean, GCP, etc.;
|
||||
|
||||
- on a private cloud like OpenStack.
|
||||
|
||||
- It can also configure existing machines through an SSH connection.
|
||||
|
||||
- It can manage as many hosts as you want, with as many "drivers" as you want.
|
||||
|
||||
---
|
||||
|
||||
## Docker Machine workflow
|
||||
|
||||
1) Prepare the environment: setup VirtualBox, obtain cloud credentials ...
|
||||
|
||||
2) Create hosts with `docker-machine create -d drivername machinename`.
|
||||
|
||||
3) Use a specific machine with `eval $(docker-machine env machinename)`.
|
||||
|
||||
4) Profit!
|
||||
|
||||
---
|
||||
|
||||
## Environment variables
|
||||
|
||||
- Most of the tools (CLI, libraries...) connecting to the Docker API can use environment variables.
|
||||
|
||||
- These variables are:
|
||||
|
||||
- `DOCKER_HOST` (indicates address+port to connect to, or path of UNIX socket)
|
||||
|
||||
- `DOCKER_TLS_VERIFY` (indicates that TLS mutual auth should be used)
|
||||
|
||||
- `DOCKER_CERT_PATH` (path to the keypair and certificate to use for auth)
|
||||
|
||||
- `docker-machine env ...` will generate the variables needed to connect to a host.
|
||||
|
||||
- `$(eval docker-machine env ...)` sets these variables in the current shell.
|
||||
|
||||
---
|
||||
|
||||
## Host management features
|
||||
|
||||
With `docker-machine`, we can:
|
||||
|
||||
- upgrade a host to the latest version of the Docker Engine,
|
||||
|
||||
- start/stop/restart hosts,
|
||||
|
||||
- get a shell on a remote machine (with SSH),
|
||||
|
||||
- copy files to/from remotes machines (with SCP),
|
||||
|
||||
- mount a remote host's directory on the local machine (with SSHFS),
|
||||
|
||||
- ...
|
||||
|
||||
---
|
||||
|
||||
## The `generic` driver
|
||||
|
||||
When provisioning a new host, `docker-machine` executes these steps:
|
||||
|
||||
1) Create the host using a cloud or hypervisor API.
|
||||
|
||||
2) Connect to the host over SSH.
|
||||
|
||||
3) Install and configure Docker on the host.
|
||||
|
||||
With the `generic` driver, we provide the IP address of an existing host
|
||||
(instead of e.g. cloud credentials) and we omit the first step.
|
||||
|
||||
This allows to provision physical machines, or VMs provided by a 3rd
|
||||
party, or use a cloud for which we don't have a provisioning API.
|
||||
@@ -58,7 +58,7 @@ class: pic
|
||||
|
||||
## The deployment problem
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -66,15 +66,15 @@ class: pic
|
||||
|
||||
## The matrix from hell
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The parallel with the shipping indsutry
|
||||
## The parallel with the shipping industry
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -82,7 +82,7 @@ class: pic
|
||||
|
||||
## Intermodal shipping containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -90,7 +90,7 @@ class: pic
|
||||
|
||||
## A new shipping ecosystem
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -98,7 +98,7 @@ class: pic
|
||||
|
||||
## A shipping container system for applications
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -106,17 +106,29 @@ class: pic
|
||||
|
||||
## Eliminate the matrix from hell
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
## Results
|
||||
|
||||
* Dev-to-prod reduced from 9 months to 15 minutes (ING)
|
||||
* [Dev-to-prod reduced from 9 months to 15 minutes (ING)](
|
||||
https://www.docker.com/sites/default/files/CS_ING_01.25.2015_1.pdf)
|
||||
|
||||
* Continuous integration job time reduced by more than 60% (BBC)
|
||||
* [Continuous integration job time reduced by more than 60% (BBC)](
|
||||
https://www.docker.com/sites/default/files/CS_BBCNews_01.25.2015_1.pdf)
|
||||
|
||||
* Dev-to-prod reduced from weeks to minutes (GILT)
|
||||
* [Deploy 100 times a day instead of once a week (GILT)](
|
||||
https://www.docker.com/sites/default/files/CS_Gilt%20Groupe_03.18.2015_0.pdf)
|
||||
|
||||
* [70% infrastructure consolidation (MetLife)](
|
||||
https://www.docker.com/customers/metlife-transforms-customer-experience-legacy-and-microservices-mashup)
|
||||
|
||||
* [60% infrastructure consolidation (Intesa Sanpaolo)](
|
||||
https://blog.docker.com/2017/11/intesa-sanpaolo-builds-resilient-foundation-banking-docker-enterprise-edition/)
|
||||
|
||||
* [14x application density; 60% of legacy datacenter migrated in 4 months (GE Appliances)](
|
||||
https://www.docker.com/customers/ge-uses-docker-enable-self-service-their-developers)
|
||||
|
||||
* etc.
|
||||
|
||||
361
slides/containers/Dockerfile_Tips.md
Normal file
361
slides/containers/Dockerfile_Tips.md
Normal file
@@ -0,0 +1,361 @@
|
||||
# Tips for efficient Dockerfiles
|
||||
|
||||
We will see how to:
|
||||
|
||||
* Reduce the number of layers.
|
||||
|
||||
* Leverage the build cache so that builds can be faster.
|
||||
|
||||
* Embed unit testing in the build process.
|
||||
|
||||
---
|
||||
|
||||
## Reducing the number of layers
|
||||
|
||||
* Each line in a `Dockerfile` creates a new layer.
|
||||
|
||||
* Build your `Dockerfile` to take advantage of Docker's caching system.
|
||||
|
||||
* Combine commands by using `&&` to continue commands and `\` to wrap lines.
|
||||
|
||||
Note: it is frequent to build a Dockerfile line by line:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get install thisthing
|
||||
RUN apt-get install andthatthing andthatotherone
|
||||
RUN apt-get install somemorestuff
|
||||
```
|
||||
|
||||
And then refactor it trivially before shipping:
|
||||
|
||||
```dockerfile
|
||||
RUN apt-get install thisthing andthatthing andthatotherone somemorestuff
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Avoid re-installing dependencies at each build
|
||||
|
||||
* Classic Dockerfile problem:
|
||||
|
||||
"each time I change a line of code, all my dependencies are re-installed!"
|
||||
|
||||
* Solution: `COPY` dependency lists (`package.json`, `requirements.txt`, etc.)
|
||||
by themselves to avoid reinstalling unchanged dependencies every time.
|
||||
|
||||
---
|
||||
|
||||
## Example "bad" `Dockerfile`
|
||||
|
||||
The dependencies are reinstalled every time, because the build system does not know if `requirements.txt` has been updated.
|
||||
|
||||
```bash
|
||||
FROM python
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
RUN pip install -qr requirements.txt
|
||||
EXPOSE 5000
|
||||
CMD ["python", "app.py"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Fixed `Dockerfile`
|
||||
|
||||
Adding the dependencies as a separate step means that Docker can cache more efficiently and only install them when `requirements.txt` changes.
|
||||
|
||||
```bash
|
||||
FROM python
|
||||
COPY requirements.txt /tmp/requirements.txt
|
||||
RUN pip install -qr /tmp/requirements.txt
|
||||
WORKDIR /src
|
||||
COPY . .
|
||||
EXPOSE 5000
|
||||
CMD ["python", "app.py"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Embedding unit tests in the build process
|
||||
|
||||
```dockerfile
|
||||
FROM <baseimage>
|
||||
RUN <install dependencies>
|
||||
COPY <code>
|
||||
RUN <build code>
|
||||
RUN <install test dependencies>
|
||||
COPY <test data sets and fixtures>
|
||||
RUN <unit tests>
|
||||
FROM <baseimage>
|
||||
RUN <install dependencies>
|
||||
COPY <code>
|
||||
RUN <build code>
|
||||
CMD, EXPOSE ...
|
||||
```
|
||||
|
||||
* The build fails as soon as an instruction fails
|
||||
* If `RUN <unit tests>` fails, the build doesn't produce an image
|
||||
* If it succeeds, it produces a clean image (without test libraries and data)
|
||||
|
||||
---
|
||||
|
||||
# Dockerfile examples
|
||||
|
||||
There are a number of tips, tricks, and techniques that we can use in Dockerfiles.
|
||||
|
||||
But sometimes, we have to use different (and even opposed) practices depending on:
|
||||
|
||||
- the complexity of our project,
|
||||
|
||||
- the programming language or framework that we are using,
|
||||
|
||||
- the stage of our project (early MVP vs. super-stable production),
|
||||
|
||||
- whether we're building a final image or a base for further images,
|
||||
|
||||
- etc.
|
||||
|
||||
We are going to show a few examples using very different techniques.
|
||||
|
||||
---
|
||||
|
||||
## When to optimize an image
|
||||
|
||||
When authoring official images, it is a good idea to reduce as much as possible:
|
||||
|
||||
- the number of layers,
|
||||
|
||||
- the size of the final image.
|
||||
|
||||
This is often done at the expense of build time and convenience for the image maintainer;
|
||||
but when an image is downloaded millions of time, saving even a few seconds of pull time
|
||||
can be worth it.
|
||||
|
||||
.small[
|
||||
```dockerfile
|
||||
RUN apt-get update && apt-get install -y libpng12-dev libjpeg-dev && rm -rf /var/lib/apt/lists/* \
|
||||
&& docker-php-ext-configure gd --with-png-dir=/usr --with-jpeg-dir=/usr \
|
||||
&& docker-php-ext-install gd
|
||||
...
|
||||
RUN curl -o wordpress.tar.gz -SL https://wordpress.org/wordpress-${WORDPRESS_UPSTREAM_VERSION}.tar.gz \
|
||||
&& echo "$WORDPRESS_SHA1 *wordpress.tar.gz" | sha1sum -c - \
|
||||
&& tar -xzf wordpress.tar.gz -C /usr/src/ \
|
||||
&& rm wordpress.tar.gz \
|
||||
&& chown -R www-data:www-data /usr/src/wordpress
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [Wordpress official image](https://github.com/docker-library/wordpress/blob/618490d4bdff6c5774b84b717979bfe3d6ba8ad1/apache/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## When to *not* optimize an image
|
||||
|
||||
Sometimes, it is better to prioritize *maintainer convenience*.
|
||||
|
||||
In particular, if:
|
||||
|
||||
- the image changes a lot,
|
||||
|
||||
- the image has very few users (e.g. only 1, the maintainer!),
|
||||
|
||||
- the image is built and run on the same machine,
|
||||
|
||||
- the image is built and run on machines with a very fast link ...
|
||||
|
||||
In these cases, just keep things simple!
|
||||
|
||||
(Next slide: a Dockerfile that can be used to preview a Jekyll / github pages site.)
|
||||
|
||||
---
|
||||
|
||||
```dockerfile
|
||||
FROM debian:sid
|
||||
|
||||
RUN apt-get update -q
|
||||
RUN apt-get install -yq build-essential make
|
||||
RUN apt-get install -yq zlib1g-dev
|
||||
RUN apt-get install -yq ruby ruby-dev
|
||||
RUN apt-get install -yq python-pygments
|
||||
RUN apt-get install -yq nodejs
|
||||
RUN apt-get install -yq cmake
|
||||
RUN gem install --no-rdoc --no-ri github-pages
|
||||
|
||||
COPY . /blog
|
||||
WORKDIR /blog
|
||||
|
||||
VOLUME /blog/_site
|
||||
|
||||
EXPOSE 4000
|
||||
CMD ["jekyll", "serve", "--host", "0.0.0.0", "--incremental"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-dimensional versioning systems
|
||||
|
||||
Images can have a tag, indicating the version of the image.
|
||||
|
||||
But sometimes, there are multiple important components, and we need to indicate the versions
|
||||
for all of them.
|
||||
|
||||
This can be done with environment variables:
|
||||
|
||||
```dockerfile
|
||||
ENV PIP=9.0.3 \
|
||||
ZC_BUILDOUT=2.11.2 \
|
||||
SETUPTOOLS=38.7.0 \
|
||||
PLONE_MAJOR=5.1 \
|
||||
PLONE_VERSION=5.1.0 \
|
||||
PLONE_MD5=76dc6cfc1c749d763c32fff3a9870d8d
|
||||
```
|
||||
|
||||
(Source: [Plone official image](https://github.com/plone/plone.docker/blob/master/5.1/5.1.0/alpine/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Entrypoints and wrappers
|
||||
|
||||
It is very common to define a custom entrypoint.
|
||||
|
||||
That entrypoint will generally be a script, performing any combination of:
|
||||
|
||||
- pre-flights checks (if a required dependency is not available, display
|
||||
a nice error message early instead of an obscure one in a deep log file),
|
||||
|
||||
- generation or validation of configuration files,
|
||||
|
||||
- dropping privileges (with e.g. `su` or `gosu`, sometimes combined with `chown`),
|
||||
|
||||
- and more.
|
||||
|
||||
---
|
||||
|
||||
## A typical entrypoint script
|
||||
|
||||
```dockerfile
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# first arg is '-f' or '--some-option'
|
||||
# or first arg is 'something.conf'
|
||||
if [ "${1#-}" != "$1" ] || [ "${1%.conf}" != "$1" ]; then
|
||||
set -- redis-server "$@"
|
||||
fi
|
||||
|
||||
# allow the container to be started with '--user'
|
||||
if [ "$1" = 'redis-server' -a "$(id -u)" = '0' ]; then
|
||||
chown -R redis .
|
||||
exec su-exec redis "$0" "$@"
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
```
|
||||
|
||||
(Source: [Redis official image](https://github.com/docker-library/redis/blob/d24f2be82673ccef6957210cc985e392ebdc65e4/4.0/alpine/docker-entrypoint.sh))
|
||||
|
||||
---
|
||||
|
||||
## Factoring information
|
||||
|
||||
To facilitate maintenance (and avoid human errors), avoid to repeat information like:
|
||||
|
||||
- version numbers,
|
||||
|
||||
- remote asset URLs (e.g. source tarballs) ...
|
||||
|
||||
Instead, use environment variables.
|
||||
|
||||
.small[
|
||||
```dockerfile
|
||||
ENV NODE_VERSION 10.2.1
|
||||
...
|
||||
RUN ...
|
||||
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/node-v$NODE_VERSION.tar.xz" \
|
||||
&& curl -fsSLO --compressed "https://nodejs.org/dist/v$NODE_VERSION/SHASUMS256.txt.asc" \
|
||||
&& gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc \
|
||||
&& grep " node-v$NODE_VERSION.tar.xz\$" SHASUMS256.txt | sha256sum -c - \
|
||||
&& tar -xf "node-v$NODE_VERSION.tar.xz" \
|
||||
&& cd "node-v$NODE_VERSION" \
|
||||
...
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [Nodejs official image](https://github.com/nodejs/docker-node/blob/master/10/alpine/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Overrides
|
||||
|
||||
In theory, development and production images should be the same.
|
||||
|
||||
In practice, we often need to enable specific behaviors in development (e.g. debug statements).
|
||||
|
||||
One way to reconcile both needs is to use Compose to enable these behaviors.
|
||||
|
||||
Let's look at the [trainingwheels](https://github.com/jpetazzo/trainingwheels) demo app for an example.
|
||||
|
||||
---
|
||||
|
||||
## Production image
|
||||
|
||||
This Dockerfile builds an image leveraging gunicorn:
|
||||
|
||||
```dockerfile
|
||||
FROM python
|
||||
RUN pip install flask
|
||||
RUN pip install gunicorn
|
||||
RUN pip install redis
|
||||
COPY . /src
|
||||
WORKDIR /src
|
||||
CMD gunicorn --bind 0.0.0.0:5000 --workers 10 counter:app
|
||||
EXPOSE 5000
|
||||
```
|
||||
|
||||
(Source: [trainingwheels Dockerfile](https://github.com/jpetazzo/trainingwheels/blob/master/www/Dockerfile))
|
||||
|
||||
---
|
||||
|
||||
## Development Compose file
|
||||
|
||||
This Compose file uses the same image, but with a few overrides for development:
|
||||
|
||||
- the Flask development server is used (overriding `CMD`),
|
||||
|
||||
- the `DEBUG` environment variable is set,
|
||||
|
||||
- a volume is used to provide a faster local development workflow.
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
services:
|
||||
www:
|
||||
build: www
|
||||
ports:
|
||||
- 8000:5000
|
||||
user: nobody
|
||||
environment:
|
||||
DEBUG: 1
|
||||
command: python counter.py
|
||||
volumes:
|
||||
- ./www:/src
|
||||
```
|
||||
]
|
||||
|
||||
(Source: [trainingwheels Compose file](https://github.com/jpetazzo/trainingwheels/blob/master/docker-compose.yml))
|
||||
|
||||
---
|
||||
|
||||
## How to know which best practices are better?
|
||||
|
||||
- The main goal of containers is to make our lives easier.
|
||||
|
||||
- In this chapter, we showed many ways to write Dockerfiles.
|
||||
|
||||
- These Dockerfiles use sometimes diametrally opposed techniques.
|
||||
|
||||
- Yet, they were the "right" ones *for a specific situation.*
|
||||
|
||||
- It's OK (and even encouraged) to start simple and evolve as needed.
|
||||
|
||||
- Feel free to review this chapter later (after writing a few Dockerfiles) for inspiration!
|
||||
173
slides/containers/Ecosystem.md
Normal file
173
slides/containers/Ecosystem.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# The container ecosystem
|
||||
|
||||
In this chapter, we will talk about a few actors of the container ecosystem.
|
||||
|
||||
We have (arbitrarily) decided to focus on two groups:
|
||||
|
||||
- the Docker ecosystem,
|
||||
|
||||
- the Cloud Native Computing Foundation (CNCF) and its projects.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The Docker ecosystem
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Moby vs. Docker
|
||||
|
||||
- Docker Inc. (the company) started Docker (the open source project).
|
||||
|
||||
- At some point, it became necessary to differentiate between:
|
||||
|
||||
- the open source project (code base, contributors...),
|
||||
|
||||
- the product that we use to run containers (the engine),
|
||||
|
||||
- the platform that we use to manage containerized applications,
|
||||
|
||||
- the brand.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Exercise in brand management
|
||||
|
||||
Questions:
|
||||
|
||||
--
|
||||
|
||||
- What is the brand of the car on the previous slide?
|
||||
|
||||
--
|
||||
|
||||
- What kind of engine does it have?
|
||||
|
||||
--
|
||||
|
||||
- Would you say that it's a safe or unsafe car?
|
||||
|
||||
--
|
||||
|
||||
- Harder question: can you drive from the US West to East coasts with it?
|
||||
|
||||
--
|
||||
|
||||
The answers to these questions are part of the Tesla brand.
|
||||
|
||||
---
|
||||
|
||||
## What if ...
|
||||
|
||||
- The blueprints for Tesla cars were available for free.
|
||||
|
||||
- You could legally build your own Tesla.
|
||||
|
||||
- You were allowed to customize it entirely.
|
||||
|
||||
(Put a combustion engine, drive it with a game pad ...)
|
||||
|
||||
- You could even sell the customized versions.
|
||||
|
||||
--
|
||||
|
||||
- ... And call your customized version "Tesla".
|
||||
|
||||
--
|
||||
|
||||
Would we give the same answers to the questions on the previous slide?
|
||||
|
||||
---
|
||||
|
||||
## From Docker to Moby
|
||||
|
||||
- Docker Inc. decided to split the brand.
|
||||
|
||||
- Moby is the open source project.
|
||||
|
||||
(= Components and libraries that you can use, reuse, customize, sell ...)
|
||||
|
||||
- Docker is the product.
|
||||
|
||||
(= Software that you can use, buy support contracts ...)
|
||||
|
||||
- Docker is made with Moby.
|
||||
|
||||
- When Docker Inc. improves the Docker products, it improves Moby.
|
||||
|
||||
(And vice versa.)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Other examples
|
||||
|
||||
- *Read the Docs* is an open source project to generate and host documentation.
|
||||
|
||||
- You can host it yourself (on your own servers).
|
||||
|
||||
- You can also get hosted on readthedocs.org.
|
||||
|
||||
- The maintainers of the open source project often receive
|
||||
support requests from users of the hosted product ...
|
||||
|
||||
- ... And the maintainers of the hosted product often
|
||||
receive support requests from users of self-hosted instances.
|
||||
|
||||
- Another example:
|
||||
|
||||
*WordPress.com is a blogging platform that is owned and hosted online by
|
||||
Automattic. It is run on WordPress, an open source piece of software used by
|
||||
bloggers. (Wikipedia)*
|
||||
|
||||
---
|
||||
|
||||
## Docker CE vs Docker EE
|
||||
|
||||
- Docker CE = Community Edition.
|
||||
|
||||
- Available on most Linux distros, Mac, Windows.
|
||||
|
||||
- Optimized for developers and ease of use.
|
||||
|
||||
- Docker EE = Enterprise Edition.
|
||||
|
||||
- Available only on a subset of Linux distros + Windows servers.
|
||||
|
||||
(Only available when there is a strong partnership to offer enterprise-class support.)
|
||||
|
||||
- Optimized for production use.
|
||||
|
||||
- Comes with additional components: security scanning, RBAC ...
|
||||
|
||||
---
|
||||
|
||||
## The CNCF
|
||||
|
||||
- Non-profit, part of the Linux Foundation; founded in December 2015.
|
||||
|
||||
*The Cloud Native Computing Foundation builds sustainable ecosystems and fosters
|
||||
a community around a constellation of high-quality projects that orchestrate
|
||||
containers as part of a microservices architecture.*
|
||||
|
||||
*CNCF is an open source software foundation dedicated to making cloud-native computing universal and sustainable.*
|
||||
|
||||
- Home of Kubernetes (and many other projects now).
|
||||
|
||||
- Funded by corporate memberships.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Our First Containers
|
||||
# Our first containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -51,10 +51,13 @@ root@04c0bb0a6c07:/#
|
||||
```
|
||||
|
||||
* This is a brand new container.
|
||||
|
||||
* It runs a bare-bones, no-frills `ubuntu` system.
|
||||
|
||||
* `-it` is shorthand for `-i -t`.
|
||||
|
||||
* `-i` tells Docker to connect us to the container's stdin.
|
||||
|
||||
* `-t` tells Docker that we want a pseudo-terminal.
|
||||
|
||||
---
|
||||
@@ -72,22 +75,6 @@ Alright, we need to install it.
|
||||
|
||||
---
|
||||
|
||||
## An observation
|
||||
|
||||
Let's check how many packages are installed here.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
189
|
||||
```
|
||||
|
||||
* `dpkg -l` lists the packages installed in our container
|
||||
* `wc -l` counts them
|
||||
* If you have a Debian or Ubuntu machine, you can run the same command
|
||||
and compare the results.
|
||||
|
||||
---
|
||||
|
||||
## Install a package in our container
|
||||
|
||||
We want `figlet`, so let's install it:
|
||||
@@ -104,6 +91,12 @@ Reading package lists... Done
|
||||
|
||||
One minute later, `figlet` is installed!
|
||||
|
||||
---
|
||||
|
||||
## Try to run our freshly installed program
|
||||
|
||||
The `figlet` program takes a message as parameter.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# figlet hello
|
||||
_ _ _
|
||||
@@ -113,11 +106,34 @@ root@04c0bb0a6c07:/# figlet hello
|
||||
|_| |_|\___|_|_|\___/
|
||||
```
|
||||
|
||||
Beautiful! .emoji[😍]
|
||||
|
||||
---
|
||||
|
||||
## Exiting our container
|
||||
class: in-person
|
||||
|
||||
Just exit the shell, like you would usually do.
|
||||
## Counting packages in the container
|
||||
|
||||
Let's check how many packages are installed there.
|
||||
|
||||
```bash
|
||||
root@04c0bb0a6c07:/# dpkg -l | wc -l
|
||||
190
|
||||
```
|
||||
|
||||
* `dpkg -l` lists the packages installed in our container
|
||||
|
||||
* `wc -l` counts them
|
||||
|
||||
How many packages do we have on our host?
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## Counting packages on the host
|
||||
|
||||
Exit the container by logging out of the shell, like you would usually do.
|
||||
|
||||
(E.g. with `^D` or `exit`)
|
||||
|
||||
@@ -125,10 +141,52 @@ Just exit the shell, like you would usually do.
|
||||
root@04c0bb0a6c07:/# exit
|
||||
```
|
||||
|
||||
Now, try to:
|
||||
|
||||
* run `dpkg -l | wc -l`. How many packages are installed?
|
||||
|
||||
* run `figlet`. Does that work?
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Comparing the container and the host
|
||||
|
||||
Exit the container by logging out of the shell, with `^D` or `exit`.
|
||||
|
||||
Now try to run `figlet`. Does that work?
|
||||
|
||||
(It shouldn't; except if, by coincidence, you are running on a machine where figlet was installed before.)
|
||||
|
||||
---
|
||||
|
||||
## Host and containers are independent things
|
||||
|
||||
* We ran an `ubuntu` container on an Linux/Windows/macOS host.
|
||||
|
||||
* They have different, independent packages.
|
||||
|
||||
* Installing something on the host doesn't expose it to the container.
|
||||
|
||||
* And vice-versa.
|
||||
|
||||
* Even if both the host and the container have the same Linux distro!
|
||||
|
||||
* We can run *any container* on *any host*.
|
||||
|
||||
(One exception: Windows containers cannot run on Linux machines; at least not yet.)
|
||||
|
||||
---
|
||||
|
||||
## Where's our container?
|
||||
|
||||
* Our container is now in a *stopped* state.
|
||||
|
||||
* It still exists on disk, but all compute resources have been freed up.
|
||||
|
||||
* We will see later how to get back to that container.
|
||||
|
||||
---
|
||||
|
||||
## Starting another container
|
||||
227
slides/containers/Getting_Inside.md
Normal file
227
slides/containers/Getting_Inside.md
Normal file
@@ -0,0 +1,227 @@
|
||||
class: title
|
||||
|
||||
# Getting inside a container
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Objectives
|
||||
|
||||
On a traditional server or VM, we sometimes need to:
|
||||
|
||||
* log into the machine (with SSH or on the console),
|
||||
|
||||
* analyze the disks (by removing them or rebooting with a rescue system).
|
||||
|
||||
In this chapter, we will see how to do that with containers.
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell
|
||||
|
||||
Every once in a while, we want to log into a machine.
|
||||
|
||||
In an perfect world, this shouldn't be necessary.
|
||||
|
||||
* You need to install or update packages (and their configuration)?
|
||||
|
||||
Use configuration management. (e.g. Ansible, Chef, Puppet, Salt...)
|
||||
|
||||
* You need to view logs and metrics?
|
||||
|
||||
Collect and access them through a centralized platform.
|
||||
|
||||
In the real world, though ... we often need shell access!
|
||||
|
||||
---
|
||||
|
||||
## Not getting a shell
|
||||
|
||||
Even without a perfect deployment system, we can do many operations without getting a shell.
|
||||
|
||||
* Installing packages can (and should) be done in the container image.
|
||||
|
||||
* Configuration can be done at the image level, or when the container starts.
|
||||
|
||||
* Dynamic configuration can be stored in a volume (shared with another container).
|
||||
|
||||
* Logs written to stdout are automatically collected by the Docker Engine.
|
||||
|
||||
* Other logs can be written to a shared volume.
|
||||
|
||||
* Process information and metrics are visible from the host.
|
||||
|
||||
_Let's save logging, volumes ... for later, but let's have a look at process information!_
|
||||
|
||||
---
|
||||
|
||||
## Viewing container processes from the host
|
||||
|
||||
If you run Docker on Linux, container processes are visible on the host.
|
||||
|
||||
```bash
|
||||
$ ps faux | less
|
||||
```
|
||||
|
||||
* Scroll around the output of this command.
|
||||
|
||||
* You should see the `jpetazzo/clock` container.
|
||||
|
||||
* A containerized process is just like any other process on the host.
|
||||
|
||||
* We can use tools like `lsof`, `strace`, `gdb` ... To analyze them.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## What's the difference between a container process and a host process?
|
||||
|
||||
* Each process (containerized or not) belongs to *namespaces* and *cgroups*.
|
||||
|
||||
* The namespaces and cgroups determine what a process can "see" and "do".
|
||||
|
||||
* Analogy: each process (containerized or not) runs with a specific UID (user ID).
|
||||
|
||||
* UID=0 is root, and has elevated privileges. Other UIDs are normal users.
|
||||
|
||||
_We will give more details about namespaces and cgroups later._
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a running container
|
||||
|
||||
* Sometimes, we need to get a shell anyway.
|
||||
|
||||
* We _could_ run some SSH server in the container ...
|
||||
|
||||
* But it is easier to use `docker exec`.
|
||||
|
||||
```bash
|
||||
$ docker exec -ti ticktock sh
|
||||
```
|
||||
|
||||
* This creates a new process (running `sh`) _inside_ the container.
|
||||
|
||||
* This can also be done "manually" with the tool `nsenter`.
|
||||
|
||||
---
|
||||
|
||||
## Caveats
|
||||
|
||||
* The tool that you want to run needs to exist in the container.
|
||||
|
||||
* Some tools (like `ip netns exec`) let you attach to _one_ namespace at a time.
|
||||
|
||||
(This lets you e.g. setup network interfaces, even if you don't have `ifconfig` or `ip` in the container.)
|
||||
|
||||
* Most importantly: the container needs to be running.
|
||||
|
||||
* What if the container is stopped or crashed?
|
||||
|
||||
---
|
||||
|
||||
## Getting a shell in a stopped container
|
||||
|
||||
* A stopped container is only _storage_ (like a disk drive).
|
||||
|
||||
* We cannot SSH into a disk drive or USB stick!
|
||||
|
||||
* We need to connect the disk to a running machine.
|
||||
|
||||
* How does that translate into the container world?
|
||||
|
||||
---
|
||||
|
||||
## Analyzing a stopped container
|
||||
|
||||
As an exercise, we are going to try to find out what's wrong with `jpetazzo/crashtest`.
|
||||
|
||||
```bash
|
||||
docker run jpetazzo/crashtest
|
||||
```
|
||||
|
||||
The container starts, but then stops immediately, without any output.
|
||||
|
||||
What would MacGyver™ do?
|
||||
|
||||
First, let's check the status of that container.
|
||||
|
||||
```bash
|
||||
docker ps -l
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing filesystem changes
|
||||
|
||||
* We can use `docker diff` to see files that were added / changed / removed.
|
||||
|
||||
```bash
|
||||
docker diff <container_id>
|
||||
```
|
||||
|
||||
* The container ID was shown by `docker ps -l`.
|
||||
|
||||
* We can also see it with `docker ps -lq`.
|
||||
|
||||
* The output of `docker diff` shows some interesting log files!
|
||||
|
||||
---
|
||||
|
||||
## Accessing files
|
||||
|
||||
* We can extract files with `docker cp`.
|
||||
|
||||
```bash
|
||||
docker cp <container_id>:/var/log/nginx/error.log .
|
||||
```
|
||||
|
||||
* Then we can look at that log file.
|
||||
|
||||
```bash
|
||||
cat error.log
|
||||
```
|
||||
|
||||
(The directory `/run/nginx` doesn't exist.)
|
||||
|
||||
---
|
||||
|
||||
## Exploring a crashed container
|
||||
|
||||
* We can restart a container with `docker start` ...
|
||||
|
||||
* ... But it will probably crash again immediately!
|
||||
|
||||
* We cannot specify a different program to run with `docker start`
|
||||
|
||||
* But we can create a new image from the crashed container
|
||||
|
||||
```bash
|
||||
docker commit <container_id> debugimage
|
||||
```
|
||||
|
||||
* Then we can run a new container from that image, with a custom entrypoint
|
||||
|
||||
```bash
|
||||
docker run -ti --entrypoint sh debugimage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Obtaining a complete dump
|
||||
|
||||
* We can also dump the entire filesystem of a container.
|
||||
|
||||
* This is done with `docker export`.
|
||||
|
||||
* It generates a tar archive.
|
||||
|
||||
```bash
|
||||
docker export <container_id> | tar tv
|
||||
```
|
||||
|
||||
This will give a detailed listing of the content of the container.
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Understanding Docker Images
|
||||
# Understanding Docker images
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -46,6 +46,8 @@ In this section, we will explain:
|
||||
|
||||
## Example for a Java webapp
|
||||
|
||||
Each of the following items will correspond to one layer:
|
||||
|
||||
* CentOS base layer
|
||||
* Packages and configuration files added by our local IT
|
||||
* JRE
|
||||
@@ -56,6 +58,22 @@ In this section, we will explain:
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## The read-write layer
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## Multiple containers sharing the same image
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Differences between containers and images
|
||||
|
||||
* An image is a read-only filesystem.
|
||||
@@ -63,24 +81,14 @@ In this section, we will explain:
|
||||
* A container is an encapsulated set of processes running in a
|
||||
read-write copy of that filesystem.
|
||||
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
* To optimize container boot time, *copy-on-write* is used
|
||||
instead of regular copy.
|
||||
|
||||
* `docker run` starts a container from a given image.
|
||||
|
||||
Let's give a couple of metaphors to illustrate those concepts.
|
||||
|
||||
---
|
||||
|
||||
## Image as stencils
|
||||
|
||||
Images are like templates or stencils that you can create containers from.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Object-oriented programming
|
||||
## Comparison with object-oriented programming
|
||||
|
||||
* Images are conceptually similar to *classes*.
|
||||
|
||||
@@ -99,7 +107,7 @@ If an image is read-only, how do we change it?
|
||||
* We create a new container from that image.
|
||||
|
||||
* Then we make changes to that container.
|
||||
|
||||
|
||||
* When we are satisfied with those changes, we transform them into a new layer.
|
||||
|
||||
* A new image is created by stacking the new layer on top of the old image.
|
||||
@@ -118,7 +126,7 @@ If an image is read-only, how do we change it?
|
||||
|
||||
## Creating the first images
|
||||
|
||||
There is a special empty image called `scratch`.
|
||||
There is a special empty image called `scratch`.
|
||||
|
||||
* It allows to *build from scratch*.
|
||||
|
||||
@@ -138,7 +146,7 @@ Note: you will probably never have to do this yourself.
|
||||
* Saves all the changes made to a container into a new layer.
|
||||
* Creates a new image (effectively a copy of the container).
|
||||
|
||||
`docker build`
|
||||
`docker build` **(used 99% of the time)**
|
||||
|
||||
* Performs a repeatable build sequence.
|
||||
* This is the preferred method!
|
||||
@@ -180,6 +188,8 @@ Those images include:
|
||||
|
||||
* Ready-to-use components and services, like redis, postgresql...
|
||||
|
||||
* Over 130 at this point!
|
||||
|
||||
---
|
||||
|
||||
## User namespace
|
||||
@@ -299,9 +309,9 @@ There are two ways to download images.
|
||||
```bash
|
||||
$ docker pull debian:jessie
|
||||
Pulling repository debian
|
||||
b164861940b8: Download complete
|
||||
b164861940b8: Pulling image (jessie) from debian
|
||||
d1881793a057: Download complete
|
||||
b164861940b8: Download complete
|
||||
b164861940b8: Pulling image (jessie) from debian
|
||||
d1881793a057: Download complete
|
||||
```
|
||||
|
||||
* As seen previously, images are made up of layers.
|
||||
@@ -1,10 +1,8 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Install Docker
|
||||
# Installing Docker
|
||||
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -31,7 +29,7 @@ We can arbitrarily distinguish:
|
||||
|
||||
* Installing Docker on an existing Linux machine (physical or VM)
|
||||
|
||||
* Installing Docker on MacOS or Windows
|
||||
* Installing Docker on macOS or Windows
|
||||
|
||||
* Installing Docker on a fleet of cloud VMs
|
||||
|
||||
@@ -39,7 +37,9 @@ We can arbitrarily distinguish:
|
||||
|
||||
## Installing Docker on Linux
|
||||
|
||||
* The recommended method is to install the packages supplied by Docker Inc.
|
||||
* The recommended method is to install the packages supplied by Docker Inc.:
|
||||
|
||||
https://store.docker.com
|
||||
|
||||
* The general method is:
|
||||
|
||||
@@ -57,13 +57,35 @@ We can arbitrarily distinguish:
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on MacOS and Windows
|
||||
class: extra-details
|
||||
|
||||
* On MacOS, the recommended method is to use Docker4Mac:
|
||||
## Docker Inc. packages vs distribution packages
|
||||
|
||||
* Docker Inc. releases new versions monthly (edge) and quarterly (stable)
|
||||
|
||||
* Releases are immediately available on Docker Inc.'s package repositories
|
||||
|
||||
* Linux distros don't always update to the latest Docker version
|
||||
|
||||
(Sometimes, updating would break their guidelines for major/minor upgrades)
|
||||
|
||||
* Sometimes, some distros have carried packages with custom patches
|
||||
|
||||
* Sometimes, these patches added critical security bugs ☹
|
||||
|
||||
* Installing through Docker Inc.'s repositories is a bit of extra work …
|
||||
|
||||
… but it is generally worth it!
|
||||
|
||||
---
|
||||
|
||||
## Installing Docker on macOS and Windows
|
||||
|
||||
* On macOS, the recommended method is to use Docker for Mac:
|
||||
|
||||
https://docs.docker.com/docker-for-mac/install/
|
||||
|
||||
* On Windows 10 Pro, Enterprise, and Eduction, you can use Docker4Windows:
|
||||
* On Windows 10 Pro, Enterprise, and Education, you can use Docker for Windows:
|
||||
|
||||
https://docs.docker.com/docker-for-windows/install/
|
||||
|
||||
@@ -71,9 +93,36 @@ We can arbitrarily distinguish:
|
||||
|
||||
https://docs.docker.com/toolbox/toolbox_install_windows/
|
||||
|
||||
* On Windows Server 2016, you can also install the native engine:
|
||||
|
||||
https://docs.docker.com/install/windows/docker-ee/
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on MacOS and Windows
|
||||
## Docker for Mac and Docker for Windows
|
||||
|
||||
* Special Docker Editions that integrate well with their respective host OS
|
||||
|
||||
* Provide user-friendly GUI to edit Docker configuration and settings
|
||||
|
||||
* Leverage the host OS virtualization subsystem (e.g. the [Hypervisor API](https://developer.apple.com/documentation/hypervisor) on macOS)
|
||||
|
||||
* Installed like normal user applications on the host
|
||||
|
||||
* Under the hood, they both run a tiny VM (transparent to our daily use)
|
||||
|
||||
* Access network resources like normal applications
|
||||
<br/>(and therefore, play better with enterprise VPNs and firewalls)
|
||||
|
||||
* Support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
<br/>
|
||||
... but we can use `docker-machine`, the Docker Toolbox, VirtualBox, etc. to get a cluster.
|
||||
|
||||
---
|
||||
|
||||
## Running Docker on macOS and Windows
|
||||
|
||||
When you execute `docker version` from the terminal:
|
||||
|
||||
@@ -90,25 +139,6 @@ This will also allow to use remote Engines exactly as if they were local.
|
||||
|
||||
---
|
||||
|
||||
## Docker4Mac and Docker4Windows
|
||||
|
||||
* They let you run Docker without VirtualBox
|
||||
|
||||
* They are installed like normal applications (think QEMU, but faster)
|
||||
|
||||
* They access network resources like normal applications
|
||||
<br/>(and therefore, play well with enterprise VPNs and firewalls)
|
||||
|
||||
* They support filesystem sharing through volumes (we'll talk about this later)
|
||||
|
||||
* They only support running one Docker VM at a time ...
|
||||
|
||||
... so if you want to run a full cluster locally, install e.g. the Docker Toolbox
|
||||
|
||||
* They can co-exist with the Docker Toolbox
|
||||
|
||||
---
|
||||
|
||||
## Important PSA about security
|
||||
|
||||
* If you have access to the Docker control socket, you can take over the machine
|
||||
82
slides/containers/Labels.md
Normal file
82
slides/containers/Labels.md
Normal file
@@ -0,0 +1,82 @@
|
||||
# Labels
|
||||
|
||||
* Labels allow to attach arbitrary metadata to containers.
|
||||
|
||||
* Labels are key/value pairs.
|
||||
|
||||
* They are specified at container creation.
|
||||
|
||||
* You can query them with `docker inspect`.
|
||||
|
||||
* They can also be used as filters with some commands (e.g. `docker ps`).
|
||||
|
||||
---
|
||||
|
||||
## Using labels
|
||||
|
||||
Let's create a few containers with a label `owner`.
|
||||
|
||||
```bash
|
||||
docker run -d -l owner=alice nginx
|
||||
docker run -d -l owner=bob nginx
|
||||
docker run -d -l owner nginx
|
||||
```
|
||||
|
||||
We didn't specify a value for the `owner` label in the last example.
|
||||
|
||||
This is equivalent to setting the value to be an empty string.
|
||||
|
||||
---
|
||||
|
||||
## Querying labels
|
||||
|
||||
We can view the labels with `docker inspect`.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -lq) | grep -A3 Labels
|
||||
"Labels": {
|
||||
"maintainer": "NGINX Docker Maintainers <docker-maint@nginx.com>",
|
||||
"owner": ""
|
||||
},
|
||||
```
|
||||
|
||||
We can use the `--format` flag to list the value of a label.
|
||||
|
||||
```bash
|
||||
$ docker inspect $(docker ps -q) --format 'OWNER={{.Config.Labels.owner}}'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using labels to select containers
|
||||
|
||||
We can list containers having a specific label.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner
|
||||
```
|
||||
|
||||
Or we can list containers having a specific label with a specific value.
|
||||
|
||||
```bash
|
||||
$ docker ps --filter label=owner=alice
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Use-cases for labels
|
||||
|
||||
|
||||
* HTTP vhost of a web app or web service.
|
||||
|
||||
(The label is used to generate the configuration for NGINX, HAProxy, etc.)
|
||||
|
||||
* Backup schedule for a stateful service.
|
||||
|
||||
(The label is used by a cron job to determine if/when to backup container data.)
|
||||
|
||||
* Service ownership.
|
||||
|
||||
(To determine internal cross-billing, or who to page in case of outage.)
|
||||
|
||||
* etc.
|
||||
441
slides/containers/Local_Development_Workflow.md
Normal file
441
slides/containers/Local_Development_Workflow.md
Normal file
@@ -0,0 +1,441 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Local development workflow with Docker
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Objectives
|
||||
|
||||
At the end of this section, you will be able to:
|
||||
|
||||
* Share code between container and host.
|
||||
|
||||
* Use a simple local development workflow.
|
||||
|
||||
---
|
||||
|
||||
## Local development in a container
|
||||
|
||||
We want to solve the following issues:
|
||||
|
||||
- "Works on my machine"
|
||||
|
||||
- "Not the same version"
|
||||
|
||||
- "Missing dependency"
|
||||
|
||||
By using Docker containers, we will get a consistent development environment.
|
||||
|
||||
---
|
||||
|
||||
## Working on the "namer" application
|
||||
|
||||
* We have to work on some application whose code is at:
|
||||
|
||||
https://github.com/jpetazzo/namer.
|
||||
|
||||
* What is it? We don't know yet!
|
||||
|
||||
* Let's download the code.
|
||||
|
||||
```bash
|
||||
$ git clone https://github.com/jpetazzo/namer
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Looking at the code
|
||||
|
||||
```bash
|
||||
$ cd namer
|
||||
$ ls -1
|
||||
company_name_generator.rb
|
||||
config.ru
|
||||
docker-compose.yml
|
||||
Dockerfile
|
||||
Gemfile
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
Aha, a `Gemfile`! This is Ruby. Probably. We know this. Maybe?
|
||||
|
||||
---
|
||||
|
||||
## Looking at the `Dockerfile`
|
||||
|
||||
```dockerfile
|
||||
FROM ruby
|
||||
|
||||
COPY . /src
|
||||
WORKDIR /src
|
||||
RUN bundler install
|
||||
|
||||
CMD ["rackup", "--host", "0.0.0.0"]
|
||||
EXPOSE 9292
|
||||
```
|
||||
|
||||
* This application is using a base `ruby` image.
|
||||
* The code is copied in `/src`.
|
||||
* Dependencies are installed with `bundler`.
|
||||
* The application is started with `rackup`.
|
||||
* It is listening on port 9292.
|
||||
|
||||
---
|
||||
|
||||
## Building and running the "namer" application
|
||||
|
||||
* Let's build the application with the `Dockerfile`!
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker build -t namer .
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
* Then run it. *We need to expose its ports.*
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker run -dP namer
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
* Check on which port the container is listening.
|
||||
|
||||
--
|
||||
|
||||
```bash
|
||||
$ docker ps -l
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our application
|
||||
|
||||
* Point our browser to our Docker node, on the port allocated to the container.
|
||||
|
||||
--
|
||||
|
||||
* Hit "reload" a few times.
|
||||
|
||||
--
|
||||
|
||||
* This is an enterprise-class, carrier-grade, ISO-compliant company name generator!
|
||||
|
||||
(With 50% more bullshit than the average competition!)
|
||||
|
||||
(Wait, was that 50% more, or 50% less? *Anyway!*)
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Making changes to the code
|
||||
|
||||
Option 1:
|
||||
|
||||
* Edit the code locally
|
||||
* Rebuild the image
|
||||
* Re-run the container
|
||||
|
||||
Option 2:
|
||||
|
||||
* Enter the container (with `docker exec`)
|
||||
* Install an editor
|
||||
* Make changes from within the container
|
||||
|
||||
Option 3:
|
||||
|
||||
* Use a *volume* to mount local files into the container
|
||||
* Make changes locally
|
||||
* Changes are reflected into the container
|
||||
|
||||
---
|
||||
|
||||
## Our first volume
|
||||
|
||||
We will tell Docker to map the current directory to `/src` in the container.
|
||||
|
||||
```bash
|
||||
$ docker run -d -v $(pwd):/src -P namer
|
||||
```
|
||||
|
||||
* `-d`: the container should run in detached mode (in the background).
|
||||
|
||||
* `-v`: the following host directory should be mounted inside the container.
|
||||
|
||||
* `-P`: publish all the ports exposed by this image.
|
||||
|
||||
* `namer` is the name of the image we will run.
|
||||
|
||||
* We don't specify a command to run because it is already set in the Dockerfile.
|
||||
|
||||
Note: on Windows, replace `$(pwd)` with `%cd%` (or `${pwd}` if you use PowerShell).
|
||||
|
||||
---
|
||||
|
||||
## Mounting volumes inside containers
|
||||
|
||||
The `-v` flag mounts a directory from your host into your Docker container.
|
||||
|
||||
The flag structure is:
|
||||
|
||||
```bash
|
||||
[host-path]:[container-path]:[rw|ro]
|
||||
```
|
||||
|
||||
* If `[host-path]` or `[container-path]` doesn't exist it is created.
|
||||
|
||||
* You can control the write status of the volume with the `ro` and
|
||||
`rw` options.
|
||||
|
||||
* If you don't specify `rw` or `ro`, it will be `rw` by default.
|
||||
|
||||
There will be a full chapter about volumes!
|
||||
|
||||
---
|
||||
|
||||
## Testing the development container
|
||||
|
||||
* Check the port used by our new container.
|
||||
|
||||
```bash
|
||||
$ docker ps -l
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
045885b68bc5 namer rackup 3 seconds ago Up ... 0.0.0.0:32770->9292/tcp ...
|
||||
```
|
||||
|
||||
* Open the application in your web browser.
|
||||
|
||||
---
|
||||
|
||||
## Making a change to our application
|
||||
|
||||
Our customer really doesn't like the color of our text. Let's change it.
|
||||
|
||||
```bash
|
||||
$ vi company_name_generator.rb
|
||||
```
|
||||
|
||||
And change
|
||||
|
||||
```css
|
||||
color: royalblue;
|
||||
```
|
||||
|
||||
To:
|
||||
|
||||
```css
|
||||
color: red;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing our changes
|
||||
|
||||
* Reload the application in our browser.
|
||||
|
||||
--
|
||||
|
||||
* The color should have changed.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Understanding volumes
|
||||
|
||||
* Volumes are *not* copying or synchronizing files between the host and the container.
|
||||
|
||||
* Volumes are *bind mounts*: a kernel mechanism associating a path to another.
|
||||
|
||||
* Bind mounts are *kind of* similar to symbolic links, but at a very different level.
|
||||
|
||||
* Changes made on the host or on the container will be visible on the other side.
|
||||
|
||||
(Since under the hood, it's the same file on both anyway.)
|
||||
|
||||
---
|
||||
|
||||
## Trash your servers and burn your code
|
||||
|
||||
*(This is the title of a
|
||||
[2013 blog post](http://chadfowler.com/2013/06/23/immutable-deployments.html)
|
||||
by Chad Fowler, where he explains the concept of immutable infrastructure.)*
|
||||
|
||||
--
|
||||
|
||||
* Let's mess up majorly with our container.
|
||||
|
||||
(Remove files or whatever.)
|
||||
|
||||
* Now, how can we fix this?
|
||||
|
||||
--
|
||||
|
||||
* Our old container (with the blue version of the code) is still running.
|
||||
|
||||
* See on which port it is exposed:
|
||||
```bash
|
||||
docker ps
|
||||
```
|
||||
|
||||
* Point our browser to it to confirm that it still works fine.
|
||||
|
||||
---
|
||||
|
||||
## Immutable infrastructure in a nutshell
|
||||
|
||||
* Instead of *updating* a server, we deploy a new one.
|
||||
|
||||
* This might be challenging with classical servers, but it's trivial with containers.
|
||||
|
||||
* In fact, with Docker, the most logical workflow is to build a new image and run it.
|
||||
|
||||
* If something goes wrong with the new image, we can always restart the old one.
|
||||
|
||||
* We can even keep both versions running side by side.
|
||||
|
||||
If this pattern sounds interesting, you might want to read about *blue/green deployment*
|
||||
and *canary deployments*.
|
||||
|
||||
---
|
||||
|
||||
## Improving the workflow
|
||||
|
||||
The workflow that we showed is nice, but it requires us to:
|
||||
|
||||
* keep track of all the `docker run` flags required to run the container,
|
||||
|
||||
* inspect the `Dockerfile` to know which path(s) to mount,
|
||||
|
||||
* write scripts to hide that complexity.
|
||||
|
||||
There has to be a better way!
|
||||
|
||||
---
|
||||
|
||||
## Docker Compose to the rescue
|
||||
|
||||
* Docker Compose allows us to "encode" `docker run` parameters in a YAML file.
|
||||
|
||||
* Here is the `docker-compose.yml` file that we can use for our "namer" app:
|
||||
|
||||
```yaml
|
||||
www:
|
||||
build: .
|
||||
volumes:
|
||||
- .:/src
|
||||
ports:
|
||||
- 80:9292
|
||||
```
|
||||
|
||||
* Try it:
|
||||
```bash
|
||||
$ docker-compose up -d
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Working with Docker Compose
|
||||
|
||||
* When you see a `docker-compose.yml` file, you can use `docker-compose up`.
|
||||
|
||||
* It can build images and run them with the required parameters.
|
||||
|
||||
* Compose can also deal with complex, multi-container apps.
|
||||
|
||||
(More on this later!)
|
||||
|
||||
---
|
||||
|
||||
## Recap of the development workflow
|
||||
|
||||
1. Write a Dockerfile to build an image containing our development environment.
|
||||
<br/>
|
||||
(Rails, Django, ... and all the dependencies for our app)
|
||||
|
||||
2. Start a container from that image.
|
||||
<br/>
|
||||
Use the `-v` flag to mount our source code inside the container.
|
||||
|
||||
3. Edit the source code outside the containers, using regular tools.
|
||||
<br/>
|
||||
(vim, emacs, textmate...)
|
||||
|
||||
4. Test the application.
|
||||
<br/>
|
||||
(Some frameworks pick up changes automatically.
|
||||
<br/>Others require you to Ctrl-C + restart after each modification.)
|
||||
|
||||
5. Iterate and repeat steps 3 and 4 until satisfied.
|
||||
|
||||
6. When done, commit+push source code changes.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Debugging inside the container
|
||||
|
||||
Docker has a command called `docker exec`.
|
||||
|
||||
It allows users to run a new process in a container which is already running.
|
||||
|
||||
If sometimes you find yourself wishing you could SSH into a container: you can use `docker exec` instead.
|
||||
|
||||
You can get a shell prompt inside an existing container this way, or run an arbitrary process for automation.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## `docker exec` example
|
||||
|
||||
```bash
|
||||
$ # You can run ruby commands in the area the app is running and more!
|
||||
$ docker exec -it <yourContainerId> bash
|
||||
root@5ca27cf74c2e:/opt/namer# irb
|
||||
irb(main):001:0> [0, 1, 2, 3, 4].map {|x| x ** 2}.compact
|
||||
=> [0, 1, 4, 9, 16]
|
||||
irb(main):002:0> exit
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Stopping the container
|
||||
|
||||
Now that we're done let's stop our container.
|
||||
|
||||
```bash
|
||||
$ docker stop <yourContainerID>
|
||||
```
|
||||
|
||||
And remove it.
|
||||
|
||||
```bash
|
||||
$ docker rm <yourContainerID>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Section summary
|
||||
|
||||
We've learned how to:
|
||||
|
||||
* Share code between container and host.
|
||||
|
||||
* Set our working directory.
|
||||
|
||||
* Use a simple local development workflow.
|
||||
|
||||
294
slides/containers/Logging.md
Normal file
294
slides/containers/Logging.md
Normal file
@@ -0,0 +1,294 @@
|
||||
# Logging
|
||||
|
||||
In this chapter, we will explain the different ways to send logs from containers.
|
||||
|
||||
We will then show one particular method in action, using ELK and Docker's logging drivers.
|
||||
|
||||
---
|
||||
|
||||
## There are many ways to send logs
|
||||
|
||||
- The simplest method is to write on the standard output and error.
|
||||
|
||||
- Applications can write their logs to local files.
|
||||
|
||||
(The files are usually periodically rotated and compressed.)
|
||||
|
||||
- It is also very common (on UNIX systems) to use syslog.
|
||||
|
||||
(The logs are collected by syslogd or an equivalent like journald.)
|
||||
|
||||
- In large applications with many components, it is common to use a logging service.
|
||||
|
||||
(The code uses a library to send messages to the logging service.)
|
||||
|
||||
*All these methods are available with containers.*
|
||||
|
||||
---
|
||||
|
||||
## Writing on stdout/stderr
|
||||
|
||||
- The standard output and error of containers is managed by the container engine.
|
||||
|
||||
- This means that each line written by the container is received by the engine.
|
||||
|
||||
- The engine can then do "whatever" with these log lines.
|
||||
|
||||
- With Docker, the default configuration is to write the logs to local files.
|
||||
|
||||
- The files can then be queried with e.g. `docker logs` (and the equivalent API request).
|
||||
|
||||
- This can be customized, as we will see later.
|
||||
|
||||
---
|
||||
|
||||
## Writing to local files
|
||||
|
||||
- If we write to files, it is possible to access them but cumbersome.
|
||||
|
||||
(We have to use `docker exec` or `docker cp`.)
|
||||
|
||||
- Furthermore, if the container is stopped, we cannot use `docker exec`.
|
||||
|
||||
- If the container is deleted, the logs disappear.
|
||||
|
||||
- What should we do for programs who can only log to local files?
|
||||
|
||||
--
|
||||
|
||||
- There are multiple solutions.
|
||||
|
||||
---
|
||||
|
||||
## Using a volume or bind mount
|
||||
|
||||
- Instead of writing logs to a normal directory, we can place them on a volume.
|
||||
|
||||
- The volume can be accessed by other containers.
|
||||
|
||||
- We can run a program like `filebeat` in another container accessing the same volume.
|
||||
|
||||
(`filebeat` reads local log files continuously, like `tail -f`, and sends them
|
||||
to a centralized system like ElasticSearch.)
|
||||
|
||||
- We can also use a bind mount, e.g. `-v /var/log/containers/www:/var/log/tomcat`.
|
||||
|
||||
- The container will write log files to a directory mapped to a host directory.
|
||||
|
||||
- The log files will appear on the host and be consumable directly from the host.
|
||||
|
||||
---
|
||||
|
||||
## Using logging services
|
||||
|
||||
- We can use logging frameworks (like log4j or the Python `logging` package).
|
||||
|
||||
- These frameworks require some code and/or configuration in our application code.
|
||||
|
||||
- These mechanisms can be used identically inside or outside of containers.
|
||||
|
||||
- Sometimes, we can leverage containerized networking to simplify their setup.
|
||||
|
||||
- For instance, our code can send log messages to a server named `log`.
|
||||
|
||||
- The name `log` will resolve to different addresses in development, production, etc.
|
||||
|
||||
---
|
||||
|
||||
## Using syslog
|
||||
|
||||
- What if our code (or the program we are running in containers) uses syslog?
|
||||
|
||||
- One possibility is to run a syslog daemon in the container.
|
||||
|
||||
- Then that daemon can be setup to write to local files or forward to the network.
|
||||
|
||||
- Under the hood, syslog clients connect to a local UNIX socket, `/dev/log`.
|
||||
|
||||
- We can expose a syslog socket to the container (by using a volume or bind-mount).
|
||||
|
||||
- Then just create a symlink from `/dev/log` to the syslog socket.
|
||||
|
||||
- Voilà!
|
||||
|
||||
---
|
||||
|
||||
## Using logging drivers
|
||||
|
||||
- If we log to stdout and stderr, the container engine receives the log messages.
|
||||
|
||||
- The Docker Engine has a modular logging system with many plugins, including:
|
||||
|
||||
- json-file (the default one)
|
||||
- syslog
|
||||
- journald
|
||||
- gelf
|
||||
- fluentd
|
||||
- splunk
|
||||
- etc.
|
||||
|
||||
- Each plugin can process and forward the logs to another process or system.
|
||||
|
||||
---
|
||||
|
||||
## A word of warning about `json-file`
|
||||
|
||||
- By default, log file size is unlimited.
|
||||
|
||||
- This means that a very verbose container *will* use up all your disk space.
|
||||
|
||||
(Or a less verbose container, but running for a very long time.)
|
||||
|
||||
- Log rotation can be enabled by setting a `max-size` option.
|
||||
|
||||
- Older log files can be removed by setting a `max-file` option.
|
||||
|
||||
- Just like other logging options, these can be set per container, or globally.
|
||||
|
||||
Example:
|
||||
```bash
|
||||
$ docker run --log-opt max-size=10m --log-opt max-file=3 elasticsearch
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Demo: sending logs to ELK
|
||||
|
||||
- We are going to deploy an ELK stack.
|
||||
|
||||
- It will accept logs over a GELF socket.
|
||||
|
||||
- We will run a few containers with the `gelf` logging driver.
|
||||
|
||||
- We will then see our logs in Kibana, the web interface provided by ELK.
|
||||
|
||||
*Important foreword: this is not an "official" or "recommended"
|
||||
setup; it is just an example. We used ELK in this demo because
|
||||
it's a popular setup and we keep being asked about it; but you
|
||||
will have equal success with Fluent or other logging stacks!*
|
||||
|
||||
---
|
||||
|
||||
## What's in an ELK stack?
|
||||
|
||||
- ELK is three components:
|
||||
|
||||
- ElasticSearch (to store and index log entries)
|
||||
|
||||
- Logstash (to receive log entries from various
|
||||
sources, process them, and forward them to various
|
||||
destinations)
|
||||
|
||||
- Kibana (to view/search log entries with a nice UI)
|
||||
|
||||
- The only component that we will configure is Logstash
|
||||
|
||||
- We will accept log entries using the GELF protocol
|
||||
|
||||
- Log entries will be stored in ElasticSearch,
|
||||
<br/>and displayed on Logstash's stdout for debugging
|
||||
|
||||
---
|
||||
|
||||
## Running ELK
|
||||
|
||||
- We are going to use a Compose file describing the ELK stack.
|
||||
|
||||
```bash
|
||||
$ cd ~/container.training/stacks
|
||||
$ docker-compose -f elk.yml up -d
|
||||
```
|
||||
|
||||
- Let's have a look at the Compose file while it's deploying.
|
||||
|
||||
---
|
||||
|
||||
## Our basic ELK deployment
|
||||
|
||||
- We are using images from the Docker Hub: `elasticsearch`, `logstash`, `kibana`.
|
||||
|
||||
- We don't need to change the configuration of ElasticSearch.
|
||||
|
||||
- We need to tell Kibana the address of ElasticSearch:
|
||||
|
||||
- it is set with the `ELASTICSEARCH_URL` environment variable,
|
||||
|
||||
- by default it is `localhost:9200`, we change it to `elasticsearch:9200`.
|
||||
|
||||
- We need to configure Logstash:
|
||||
|
||||
- we pass the entire configuration file through command-line arguments,
|
||||
|
||||
- this is a hack so that we don't have to create an image just for the config.
|
||||
|
||||
---
|
||||
|
||||
## Sending logs to ELK
|
||||
|
||||
- The ELK stack accepts log messages through a GELF socket.
|
||||
|
||||
- The GELF socket listens on UDP port 12201.
|
||||
|
||||
- To send a message, we need to change the logging driver used by Docker.
|
||||
|
||||
- This can be done globally (by reconfiguring the Engine) or on a per-container basis.
|
||||
|
||||
- Let's override the logging driver for a single container:
|
||||
|
||||
```bash
|
||||
$ docker run --log-driver=gelf --log-opt=gelf-address=udp://localhost:12201 \
|
||||
alpine echo hello world
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Viewing the logs in ELK
|
||||
|
||||
- Connect to the Kibana interface.
|
||||
|
||||
- It is exposed on port 5601.
|
||||
|
||||
- Browse http://X.X.X.X:5601.
|
||||
|
||||
---
|
||||
|
||||
## "Configuring" Kibana
|
||||
|
||||
- Kibana should offer you to "Configure an index pattern":
|
||||
<br/>in the "Time-field name" drop down, select "@timestamp", and hit the
|
||||
"Create" button.
|
||||
|
||||
- Then:
|
||||
|
||||
- click "Discover" (in the top-left corner),
|
||||
- click "Last 15 minutes" (in the top-right corner),
|
||||
- click "Last 1 hour" (in the list in the middle),
|
||||
- click "Auto-refresh" (top-right corner),
|
||||
- click "5 seconds" (top-left of the list).
|
||||
|
||||
- You should see a series of green bars (with one new green bar every minute).
|
||||
|
||||
- Our 'hello world' message should be visible there.
|
||||
|
||||
---
|
||||
|
||||
## Important afterword
|
||||
|
||||
**This is not a "production-grade" setup.**
|
||||
|
||||
It is just an educational example. Since we have only
|
||||
one node , we did set up a single
|
||||
ElasticSearch instance and a single Logstash instance.
|
||||
|
||||
In a production setup, you need an ElasticSearch cluster
|
||||
(both for capacity and availability reasons). You also
|
||||
need multiple Logstash instances.
|
||||
|
||||
And if you want to withstand
|
||||
bursts of logs, you need some kind of message queue:
|
||||
Redis if you're cheap, Kafka if you want to make sure
|
||||
that you don't drop messages on the floor. Good luck.
|
||||
|
||||
If you want to learn more about the GELF driver,
|
||||
have a look at [this blog post](
|
||||
http://jpetazzo.github.io/2017/01/20/docker-logging-gelf/).
|
||||
295
slides/containers/Multi_Stage_Builds.md
Normal file
295
slides/containers/Multi_Stage_Builds.md
Normal file
@@ -0,0 +1,295 @@
|
||||
# Reducing image size
|
||||
|
||||
* In the previous example, our final image contained:
|
||||
|
||||
* our `hello` program
|
||||
|
||||
* its source code
|
||||
|
||||
* the compiler
|
||||
|
||||
* Only the first one is strictly necessary.
|
||||
|
||||
* We are going to see how to obtain an image without the superfluous components.
|
||||
|
||||
---
|
||||
|
||||
## Can't we remove superfluous files with `RUN`?
|
||||
|
||||
What happens if we do one of the following commands?
|
||||
|
||||
- `RUN rm -rf ...`
|
||||
|
||||
- `RUN apt-get remove ...`
|
||||
|
||||
- `RUN make clean ...`
|
||||
|
||||
--
|
||||
|
||||
This adds a layer which removes a bunch of files.
|
||||
|
||||
But the previous layers (which added the files) still exist.
|
||||
|
||||
---
|
||||
|
||||
## Removing files with an extra layer
|
||||
|
||||
When downloading an image, all the layers must be downloaded.
|
||||
|
||||
| Dockerfile instruction | Layer size | Image size |
|
||||
| ---------------------- | ---------- | ---------- |
|
||||
| `FROM ubuntu` | Size of base image | Size of base image |
|
||||
| `...` | ... | Sum of this layer <br/>+ all previous ones |
|
||||
| `RUN apt-get install somepackage` | Size of files added <br/>(e.g. a few MB) | Sum of this layer <br/>+ all previous ones |
|
||||
| `...` | ... | Sum of this layer <br/>+ all previous ones |
|
||||
| `RUN apt-get remove somepackage` | Almost zero <br/>(just metadata) | Same as previous one |
|
||||
|
||||
Therefore, `RUN rm` does not reduce the size of the image or free up disk space.
|
||||
|
||||
---
|
||||
|
||||
## Removing unnecessary files
|
||||
|
||||
Various techniques are available to obtain smaller images:
|
||||
|
||||
- collapsing layers,
|
||||
|
||||
- adding binaries that are built outside of the Dockerfile,
|
||||
|
||||
- squashing the final image,
|
||||
|
||||
- multi-stage builds.
|
||||
|
||||
Let's review them quickly.
|
||||
|
||||
---
|
||||
|
||||
## Collapsing layers
|
||||
|
||||
You will frequently see Dockerfiles like this:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
RUN apt-get update && apt-get install xxx && ... && apt-get remove xxx && ...
|
||||
```
|
||||
|
||||
Or the (more readable) variant:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
RUN apt-get update \
|
||||
&& apt-get install xxx \
|
||||
&& ... \
|
||||
&& apt-get remove xxx \
|
||||
&& ...
|
||||
```
|
||||
|
||||
This `RUN` command gives us a single layer.
|
||||
|
||||
The files that are added, then removed in the same layer, do not grow the layer size.
|
||||
|
||||
---
|
||||
|
||||
## Collapsing layers: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- works on all versions of Docker
|
||||
|
||||
- doesn't require extra tools
|
||||
|
||||
Cons:
|
||||
|
||||
- not very readable
|
||||
|
||||
- some unnecessary files might still remain if the cleanup is not thorough
|
||||
|
||||
- that layer is expensive (slow to build)
|
||||
|
||||
---
|
||||
|
||||
## Building binaries outside of the Dockerfile
|
||||
|
||||
This results in a Dockerfile looking like this:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu
|
||||
COPY xxx /usr/local/bin
|
||||
```
|
||||
|
||||
Of course, this implies that the file `xxx` exists in the build context.
|
||||
|
||||
That file has to exist before you can run `docker build`.
|
||||
|
||||
For instance, it can:
|
||||
|
||||
- exist in the code repository,
|
||||
- be created by another tool (script, Makefile...),
|
||||
- be created by another container image and extracted from the image.
|
||||
|
||||
See for instance the [busybox official image](https://github.com/docker-library/busybox/blob/fe634680e32659aaf0ee0594805f74f332619a90/musl/Dockerfile) or this [older busybox image](https://github.com/jpetazzo/docker-busybox).
|
||||
|
||||
---
|
||||
|
||||
## Building binaries outside: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- final image can be very small
|
||||
|
||||
Cons:
|
||||
|
||||
- requires an extra build tool
|
||||
|
||||
- we're back in dependency hell and "works on my machine"
|
||||
|
||||
Cons, if binary is added to code repository:
|
||||
|
||||
- breaks portability across different platforms
|
||||
|
||||
- grows repository size a lot if the binary is updated frequently
|
||||
|
||||
---
|
||||
|
||||
## Squashing the final image
|
||||
|
||||
The idea is to transform the final image into a single-layer image.
|
||||
|
||||
This can be done in (at least) two ways.
|
||||
|
||||
- Activate experimental features and squash the final image:
|
||||
```bash
|
||||
docker image build --squash ...
|
||||
```
|
||||
|
||||
- Export/import the final image.
|
||||
```bash
|
||||
docker build -t temp-image .
|
||||
docker run --entrypoint true --name temp-container temp-image
|
||||
docker export temp-container | docker import - final-image
|
||||
docker rm temp-container
|
||||
docker rmi temp-image
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Squashing the image: pros and cons
|
||||
|
||||
Pros:
|
||||
|
||||
- single-layer images are smaller and faster to download
|
||||
|
||||
- removed files no longer take up storage and network resources
|
||||
|
||||
Cons:
|
||||
|
||||
- we still need to actively remove unnecessary files
|
||||
|
||||
- squash operation can take a lot of time (on big images)
|
||||
|
||||
- squash operation does not benefit from cache
|
||||
<br/>
|
||||
(even if we change just a tiny file, the whole image needs to be re-squashed)
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds
|
||||
|
||||
Multi-stage builds allow us to have multiple *stages*.
|
||||
|
||||
Each stage is a separate image, and can copy files from previous stages.
|
||||
|
||||
We're going to see how they work in more detail.
|
||||
|
||||
---
|
||||
|
||||
# Multi-stage builds
|
||||
|
||||
* At any point in our `Dockerfile`, we can add a new `FROM` line.
|
||||
|
||||
* This line starts a new stage of our build.
|
||||
|
||||
* Each stage can access the files of the previous stages with `COPY --from=...`.
|
||||
|
||||
* When a build is tagged (with `docker build -t ...`), the last stage is tagged.
|
||||
|
||||
* Previous stages are not discarded: they will be used for caching, and can be referenced.
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds in practice
|
||||
|
||||
* Each stage is numbered, starting at `0`
|
||||
|
||||
* We can copy a file from a previous stage by indicating its number, e.g.:
|
||||
|
||||
```dockerfile
|
||||
COPY --from=0 /file/from/first/stage /location/in/current/stage
|
||||
```
|
||||
|
||||
* We can also name stages, and reference these names:
|
||||
|
||||
```dockerfile
|
||||
FROM golang AS builder
|
||||
RUN ...
|
||||
FROM alpine
|
||||
COPY --from=builder /go/bin/mylittlebinary /usr/local/bin/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage builds for our C program
|
||||
|
||||
We will change our Dockerfile to:
|
||||
|
||||
* give a nickname to the first stage: `compiler`
|
||||
|
||||
* add a second stage using the same `ubuntu` base image
|
||||
|
||||
* add the `hello` binary to the second stage
|
||||
|
||||
* make sure that `CMD` is in the second stage
|
||||
|
||||
The resulting Dockerfile is on the next slide.
|
||||
|
||||
---
|
||||
|
||||
## Multi-stage build `Dockerfile`
|
||||
|
||||
Here is the final Dockerfile:
|
||||
|
||||
```dockerfile
|
||||
FROM ubuntu AS compiler
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y build-essential
|
||||
COPY hello.c /
|
||||
RUN make hello
|
||||
FROM ubuntu
|
||||
COPY --from=compiler /hello /hello
|
||||
CMD /hello
|
||||
```
|
||||
|
||||
Let's build it, and check that it works correctly:
|
||||
|
||||
```bash
|
||||
docker build -t hellomultistage .
|
||||
docker run hellomultistage
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Comparing single/multi-stage build image sizes
|
||||
|
||||
List our images with `docker images`, and check the size of:
|
||||
|
||||
- the `ubuntu` base image,
|
||||
|
||||
- the single-stage `hello` image,
|
||||
|
||||
- the multi-stage `hellomultistage` image.
|
||||
|
||||
We can achieve even smaller images if we use smaller base images.
|
||||
|
||||
However, if we use common base images (e.g. if we standardize on `ubuntu`),
|
||||
these common images will be pulled only once per node, so they are
|
||||
virtually "free."
|
||||
1124
slides/containers/Namespaces_Cgroups.md
Normal file
1124
slides/containers/Namespaces_Cgroups.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,7 @@ class: title
|
||||
|
||||
# Naming and inspecting containers
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -85,16 +85,8 @@ The `docker inspect` command will output a very detailed JSON map.
|
||||
```bash
|
||||
$ docker inspect <containerID>
|
||||
[{
|
||||
"AppArmorProfile": "",
|
||||
"Args": [],
|
||||
"Config": {
|
||||
"AttachStderr": true,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": true,
|
||||
"Cmd": [
|
||||
"bash"
|
||||
],
|
||||
"CpuShares": 0,
|
||||
...
|
||||
(many pages of JSON here)
|
||||
...
|
||||
```
|
||||
|
||||
84
slides/containers/Network_Drivers.md
Normal file
84
slides/containers/Network_Drivers.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Container network drivers
|
||||
|
||||
The Docker Engine supports many different network drivers.
|
||||
|
||||
The built-in drivers include:
|
||||
|
||||
* `bridge` (default)
|
||||
|
||||
* `none`
|
||||
|
||||
* `host`
|
||||
|
||||
* `container`
|
||||
|
||||
The driver is selected with `docker run --net ...`.
|
||||
|
||||
The different drivers are explained with more details on the following slides.
|
||||
|
||||
---
|
||||
|
||||
## The default bridge
|
||||
|
||||
* By default, the container gets a virtual `eth0` interface.
|
||||
<br/>(In addition to its own private `lo` loopback interface.)
|
||||
|
||||
* That interface is provided by a `veth` pair.
|
||||
|
||||
* It is connected to the Docker bridge.
|
||||
<br/>(Named `docker0` by default; configurable with `--bridge`.)
|
||||
|
||||
* Addresses are allocated on a private, internal subnet.
|
||||
<br/>(Docker uses 172.17.0.0/16 by default; configurable with `--bip`.)
|
||||
|
||||
* Outbound traffic goes through an iptables MASQUERADE rule.
|
||||
|
||||
* Inbound traffic goes through an iptables DNAT rule.
|
||||
|
||||
* The container can have its own routes, iptables rules, etc.
|
||||
|
||||
---
|
||||
|
||||
## The null driver
|
||||
|
||||
* Container is started with `docker run --net none ...`
|
||||
|
||||
* It only gets the `lo` loopback interface. No `eth0`.
|
||||
|
||||
* It can't send or receive network traffic.
|
||||
|
||||
* Useful for isolated/untrusted workloads.
|
||||
|
||||
---
|
||||
|
||||
## The host driver
|
||||
|
||||
* Container is started with `docker run --net host ...`
|
||||
|
||||
* It sees (and can access) the network interfaces of the host.
|
||||
|
||||
* It can bind any address, any port (for ill and for good).
|
||||
|
||||
* Network traffic doesn't have to go through NAT, bridge, or veth.
|
||||
|
||||
* Performance = native!
|
||||
|
||||
Use cases:
|
||||
|
||||
* Performance sensitive applications (VOIP, gaming, streaming...)
|
||||
|
||||
* Peer discovery (e.g. Erlang port mapper, Raft, Serf...)
|
||||
|
||||
---
|
||||
|
||||
## The container driver
|
||||
|
||||
* Container is started with `docker run --net container:id ...`
|
||||
|
||||
* It re-uses the network stack of another container.
|
||||
|
||||
* It shares with this other container the same interfaces, IP address(es), routes, iptables rules, etc.
|
||||
|
||||
* Those containers can communicate over their `lo` interface.
|
||||
<br/>(i.e. one can bind to 127.0.0.1 and the others can connect to it.)
|
||||
|
||||
422
slides/containers/Orchestration_Overview.md
Normal file
422
slides/containers/Orchestration_Overview.md
Normal file
@@ -0,0 +1,422 @@
|
||||
# Orchestration, an overview
|
||||
|
||||
In this chapter, we will:
|
||||
|
||||
* Explain what is orchestration and why we would need it.
|
||||
|
||||
* Present (from a high-level perspective) some orchestrators.
|
||||
|
||||
* Show one orchestrator (Kubernetes) in action.
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||
## What's orchestration?
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's orchestration?
|
||||
|
||||
According to Wikipedia:
|
||||
|
||||
*Orchestration describes the __automated__ arrangement,
|
||||
coordination, and management of complex computer systems,
|
||||
middleware, and services.*
|
||||
|
||||
--
|
||||
|
||||
*[...] orchestration is often discussed in the context of
|
||||
__service-oriented architecture__, __virtualization__, provisioning,
|
||||
Converged Infrastructure and __dynamic datacenter__ topics.*
|
||||
|
||||
--
|
||||
|
||||
What does that really mean?
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
--
|
||||
|
||||
- Q: do we always use 100% of our servers?
|
||||
|
||||
--
|
||||
|
||||
- A: obviously not!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
- Every night, scale down
|
||||
|
||||
(by shutting down extraneous replicated instances)
|
||||
|
||||
- Every morning, scale up
|
||||
|
||||
(by deploying new copies)
|
||||
|
||||
- "Pay for what you use"
|
||||
|
||||
(i.e. save big $$$ here)
|
||||
|
||||
---
|
||||
|
||||
## Example 1: dynamic cloud instances
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Crontab
|
||||
|
||||
- Autoscaling (save even bigger $$$)
|
||||
|
||||
That's *relatively* easy.
|
||||
|
||||
Now, how are things for our IAAS provider?
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
- Q: what's the #1 cost in a datacenter?
|
||||
|
||||
--
|
||||
|
||||
- A: electricity!
|
||||
|
||||
--
|
||||
|
||||
- Q: what uses electricity?
|
||||
|
||||
--
|
||||
|
||||
- A: servers, obviously
|
||||
|
||||
- A: ... and associated cooling
|
||||
|
||||
--
|
||||
|
||||
- Q: do we always use 100% of our servers?
|
||||
|
||||
--
|
||||
|
||||
- A: obviously not!
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
- If only we could turn off unused servers during the night...
|
||||
|
||||
- Problem: we can only turn off a server if it's totally empty!
|
||||
|
||||
(i.e. all VMs on it are stopped/moved)
|
||||
|
||||
- Solution: *migrate* VMs and shutdown empty servers
|
||||
|
||||
(e.g. combine two hypervisors with 40% load into 80%+0%,
|
||||
<br/>and shutdown the one at 0%)
|
||||
|
||||
---
|
||||
|
||||
## Example 2: dynamic datacenter
|
||||
|
||||
How do we implement this?
|
||||
|
||||
- Shutdown empty hosts (but keep some spare capacity)
|
||||
|
||||
- Start hosts again when capacity gets low
|
||||
|
||||
- Ability to "live migrate" VMs
|
||||
|
||||
(Xen already did this 10+ years ago)
|
||||
|
||||
- Rebalance VMs on a regular basis
|
||||
|
||||
- what if a VM is stopped while we move it?
|
||||
- should we allow provisioning on hosts involved in a migration?
|
||||
|
||||
*Scheduling* becomes more complex.
|
||||
|
||||
---
|
||||
|
||||
## What is scheduling?
|
||||
|
||||
According to Wikipedia (again):
|
||||
|
||||
*In computing, scheduling is the method by which threads,
|
||||
processes or data flows are given access to system resources.*
|
||||
|
||||
The scheduler is concerned mainly with:
|
||||
|
||||
- throughput (total amount or work done per time unit);
|
||||
- turnaround time (between submission and completion);
|
||||
- response time (between submission and start);
|
||||
- waiting time (between job readiness and execution);
|
||||
- fairness (appropriate times according to priorities).
|
||||
|
||||
In practice, these goals often conflict.
|
||||
|
||||
**"Scheduling" = decide which resources to use.**
|
||||
|
||||
---
|
||||
|
||||
## Exercise 1
|
||||
|
||||
- You have:
|
||||
|
||||
- 5 hypervisors (physical machines)
|
||||
|
||||
- Each server has:
|
||||
|
||||
- 16 GB RAM, 8 cores, 1 TB disk
|
||||
|
||||
- Each week, your team asks:
|
||||
|
||||
- one VM with X RAM, Y CPU, Z disk
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||
Difficulty: easy!
|
||||
|
||||
---
|
||||
|
||||
<!-- Warning, two almost identical slides (for img effect) -->
|
||||
|
||||
## Exercise 2
|
||||
|
||||
- You have:
|
||||
|
||||
- 1000+ hypervisors (and counting!)
|
||||
|
||||
- Each server has different resources:
|
||||
|
||||
- 8-500 GB of RAM, 4-64 cores, 1-100 TB disk
|
||||
|
||||
- Multiple times a day, a different team asks for:
|
||||
|
||||
- up to 50 VMs with different characteristics
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||
Difficulty: ???
|
||||
|
||||
---
|
||||
|
||||
<!-- Warning, two almost identical slides (for img effect) -->
|
||||
|
||||
## Exercise 2
|
||||
|
||||
- You have:
|
||||
|
||||
- 1000+ hypervisors (and counting!)
|
||||
|
||||
- Each server has different resources:
|
||||
|
||||
- 8-500 GB of RAM, 4-64 cores, 1-100 TB disk
|
||||
|
||||
- Multiple times a day, a different team asks for:
|
||||
|
||||
- up to 50 VMs with different characteristics
|
||||
|
||||
Scheduling = deciding which hypervisor to use for each VM.
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Exercise 3
|
||||
|
||||
- You have machines (physical and/or virtual)
|
||||
|
||||
- You have containers
|
||||
|
||||
- You are trying to put the containers on the machines
|
||||
|
||||
- Sounds familiar?
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Can we do better?
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with one resource
|
||||
|
||||
.center[]
|
||||
|
||||
Yup!
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with two resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Scheduling with three resources
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## You need to be good at this
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## But also, you must be quick!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## And be web scale!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## And think outside (?) of the box!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## Good luck!
|
||||
|
||||
.center[]
|
||||
|
||||
---
|
||||
|
||||
## TL,DR
|
||||
|
||||
* Scheduling with multiple resources (dimensions) is hard.
|
||||
|
||||
* Don't expect to solve the problem with a Tiny Shell Script.
|
||||
|
||||
* There are literally tons of research papers written on this.
|
||||
|
||||
---
|
||||
|
||||
## But our orchestrator also needs to manage ...
|
||||
|
||||
* Network connectivity (or filtering) between containers.
|
||||
|
||||
* Load balancing (external and internal).
|
||||
|
||||
* Failure recovery (if a node or a whole datacenter fails).
|
||||
|
||||
* Rolling out new versions of our applications.
|
||||
|
||||
(Canary deployments, blue/green deployments...)
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Some orchestrators
|
||||
|
||||
We are going to present briefly a few orchestrators.
|
||||
|
||||
There is no "absolute best" orchestrator.
|
||||
|
||||
It depends on:
|
||||
|
||||
- your applications,
|
||||
|
||||
- your requirements,
|
||||
|
||||
- your pre-existing skills...
|
||||
|
||||
---
|
||||
|
||||
## Nomad
|
||||
|
||||
- Open Source project by Hashicorp.
|
||||
|
||||
- Arbitrary scheduler (not just for containers).
|
||||
|
||||
- Great if you want to schedule mixed workloads.
|
||||
|
||||
(VMs, containers, processes...)
|
||||
|
||||
- Less integration with the rest of the container ecosystem.
|
||||
|
||||
---
|
||||
|
||||
## Mesos
|
||||
|
||||
- Open Source project in the Apache Foundation.
|
||||
|
||||
- Arbitrary scheduler (not just for containers).
|
||||
|
||||
- Two-level scheduler.
|
||||
|
||||
- Top-level scheduler acts as a resource broker.
|
||||
|
||||
- Second-level schedulers (aka "frameworks") obtain resources from top-level.
|
||||
|
||||
- Frameworks implement various strategies.
|
||||
|
||||
(Marathon = long running processes; Chronos = run at intervals; ...)
|
||||
|
||||
- Commercial offering through DC/OS my Mesosphere.
|
||||
|
||||
---
|
||||
|
||||
## Rancher
|
||||
|
||||
- Rancher 1 offered a simple interface for Docker hosts.
|
||||
|
||||
- Rancher 2 is a complete management platform for Docker and Kubernetes.
|
||||
|
||||
- Technically not an orchestrator, but it's a popular option.
|
||||
|
||||
---
|
||||
|
||||
## Swarm
|
||||
|
||||
- Tightly integrated with the Docker Engine.
|
||||
|
||||
- Extremely simple to deploy and setup, even in multi-manager (HA) mode.
|
||||
|
||||
- Secure by default.
|
||||
|
||||
- Strongly opinionated:
|
||||
|
||||
- smaller set of features,
|
||||
|
||||
- easier to operate.
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes
|
||||
|
||||
- Open Source project initiated by Google.
|
||||
|
||||
- Contributions from many other actors.
|
||||
|
||||
- *De facto* standard for container orchestration.
|
||||
|
||||
- Many deployment options; some of them very complex.
|
||||
|
||||
- Reputation: steep learning curve.
|
||||
|
||||
- Reality:
|
||||
|
||||
- true, if we try to understand *everything*;
|
||||
|
||||
- false, if we focus on what matters.
|
||||
|
||||
102
slides/containers/Publishing_To_Docker_Hub.md
Normal file
102
slides/containers/Publishing_To_Docker_Hub.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# Publishing images to the Docker Hub
|
||||
|
||||
We have built our first images.
|
||||
|
||||
We can now publish it to the Docker Hub!
|
||||
|
||||
*You don't have to do the exercises in this section,
|
||||
because they require an account on the Docker Hub, and we
|
||||
don't want to force anyone to create one.*
|
||||
|
||||
*Note, however, that creating an account on the Docker Hub
|
||||
is free (and doesn't require a credit card), and hosting
|
||||
public images is free as well.*
|
||||
|
||||
---
|
||||
|
||||
## Logging into our Docker Hub account
|
||||
|
||||
* This can be done from the Docker CLI:
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
.warning[When running Docker for Mac/Windows, or
|
||||
Docker on a Linux workstation, it can (and will when
|
||||
possible) integrate with your system's keyring to
|
||||
store your credentials securely. However, on most Linux
|
||||
servers, it will store your credentials in `~/.docker/config`.]
|
||||
|
||||
---
|
||||
|
||||
## Image tags and registry addresses
|
||||
|
||||
* Docker images tags are like Git tags and branches.
|
||||
|
||||
* They are like *bookmarks* pointing at a specific image ID.
|
||||
|
||||
* Tagging an image doesn't *rename* an image: it adds another tag.
|
||||
|
||||
* When pushing an image to a registry, the registry address is in the tag.
|
||||
|
||||
Example: `registry.example.net:5000/image`
|
||||
|
||||
* What about Docker Hub images?
|
||||
|
||||
--
|
||||
|
||||
* `jpetazzo/clock` is, in fact, `index.docker.io/jpetazzo/clock`
|
||||
|
||||
* `ubuntu` is, in fact, `library/ubuntu`, i.e. `index.docker.io/library/ubuntu`
|
||||
|
||||
---
|
||||
|
||||
## Tagging an image to push it on the Hub
|
||||
|
||||
* Let's tag our `figlet` image (or any other to our liking):
|
||||
```bash
|
||||
docker tag figlet jpetazzo/figlet
|
||||
```
|
||||
|
||||
* And push it to the Hub:
|
||||
```bash
|
||||
docker push jpetazzo/figlet
|
||||
```
|
||||
|
||||
* That's it!
|
||||
|
||||
--
|
||||
|
||||
* Anybody can now `docker run jpetazzo/figlet` anywhere.
|
||||
|
||||
---
|
||||
|
||||
## The goodness of automated builds
|
||||
|
||||
* You can link a Docker Hub repository with a GitHub or BitBucket repository
|
||||
|
||||
* Each push to GitHub or BitBucket will trigger a build on Docker Hub
|
||||
|
||||
* If the build succeeds, the new image is available on Docker Hub
|
||||
|
||||
* You can map tags and branches between source and container images
|
||||
|
||||
* If you work with public repositories, this is free
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Setting up an automated build
|
||||
|
||||
* We need a Dockerized repository!
|
||||
* Let's go to https://github.com/jpetazzo/trainingwheels and fork it.
|
||||
* Go to the Docker Hub (https://hub.docker.com/).
|
||||
* Select "Create" in the top-right bar, and select "Create Automated Build."
|
||||
* Connect your Docker Hub account to your GitHub account.
|
||||
* Select your user and the repository that we just forked.
|
||||
* Create.
|
||||
* Then go to "Build Settings."
|
||||
* Put `/www` in "Dockerfile Location" (or whichever directory the Dockerfile is in).
|
||||
* Click "Trigger" to build the repository immediately (without waiting for a git push).
|
||||
* Subsequent builds will happen automatically, thanks to GitHub hooks.
|
||||
229
slides/containers/Resource_Limits.md
Normal file
229
slides/containers/Resource_Limits.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Limiting resources
|
||||
|
||||
- So far, we have used containers as convenient units of deployment.
|
||||
|
||||
- What happens when a container tries to use more resources than available?
|
||||
|
||||
(RAM, CPU, disk usage, disk and network I/O...)
|
||||
|
||||
- What happens when multiple containers compete for the same resource?
|
||||
|
||||
- Can we limit resources available to a container?
|
||||
|
||||
(Spoiler alert: yes!)
|
||||
|
||||
---
|
||||
|
||||
## Container processes are normal processes
|
||||
|
||||
- Containers are closer to "fancy processes" than to "lightweight VMs".
|
||||
|
||||
- A process running in a container is, in fact, a process running on the host.
|
||||
|
||||
- Let's look at the output of `ps` on a container host running 3 containers :
|
||||
|
||||
```
|
||||
0 2662 0.2 0.3 /usr/bin/dockerd -H fd://
|
||||
0 2766 0.1 0.1 \_ docker-containerd --config /var/run/docker/containe
|
||||
0 23479 0.0 0.0 \_ docker-containerd-shim -namespace moby -workdir
|
||||
0 23497 0.0 0.0 | \_ `nginx`: master process nginx -g daemon off;
|
||||
101 23543 0.0 0.0 | \_ `nginx`: worker process
|
||||
0 23565 0.0 0.0 \_ docker-containerd-shim -namespace moby -workdir
|
||||
102 23584 9.4 11.3 | \_ `/docker-java-home/jre/bin/java` -Xms2g -Xmx2
|
||||
0 23707 0.0 0.0 \_ docker-containerd-shim -namespace moby -workdir
|
||||
0 23725 0.0 0.0 \_ `/bin/sh`
|
||||
```
|
||||
|
||||
- The highlighted processes are containerized processes.
|
||||
<br/>
|
||||
(That host is running nginx, elasticsearch, and alpine.)
|
||||
|
||||
---
|
||||
|
||||
## By default: nothing changes
|
||||
|
||||
- What happens when a process uses too much memory on a Linux system?
|
||||
|
||||
--
|
||||
|
||||
- Simplified answer:
|
||||
|
||||
- swap is used (if available);
|
||||
|
||||
- if there is not enough swap space, eventually, the out-of-memory killer is invoked;
|
||||
|
||||
- the OOM killer uses heuristics to kill processes;
|
||||
|
||||
- sometimes, it kills an unrelated process.
|
||||
|
||||
--
|
||||
|
||||
- What happens when a container uses too much memory?
|
||||
|
||||
- The same thing!
|
||||
|
||||
(i.e., a process eventually gets killed, possibly in another container.)
|
||||
|
||||
---
|
||||
|
||||
## Limiting container resources
|
||||
|
||||
- The Linux kernel offers rich mechanisms to limit container resources.
|
||||
|
||||
- For memory usage, the mechanism is part of the *cgroup* subsystem.
|
||||
|
||||
- This subsystem allows to limit the memory for a process or a group of processes.
|
||||
|
||||
- A container engine leverages these mechanisms to limit memory for a container.
|
||||
|
||||
- The out-of-memory killer has a new behavior:
|
||||
|
||||
- it runs when a container exceeds its allowed memory usage,
|
||||
|
||||
- in that case, it only kills processes in that container.
|
||||
|
||||
---
|
||||
|
||||
## Limiting memory in practice
|
||||
|
||||
- The Docker Engine offers multiple flags to limit memory usage.
|
||||
|
||||
- The two most useful ones are `--memory` and `--memory-swap`.
|
||||
|
||||
- `--memory` limits the amount of physical RAM used by a container.
|
||||
|
||||
- `--memory-swap` limits the total amount (RAM+swap) used by a container.
|
||||
|
||||
- The memory limit can be expressed in bytes, or with a unit suffix.
|
||||
|
||||
(e.g.: `--memory 100m` = 100 megabytes.)
|
||||
|
||||
- We will see two strategies: limiting RAM usage, or limiting both
|
||||
|
||||
---
|
||||
|
||||
## Limiting RAM usage
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
docker run -ti --memory 100m python
|
||||
```
|
||||
|
||||
If the container tries to use more than 100 MB of RAM, *and* swap is available:
|
||||
|
||||
- the container will not be killed,
|
||||
|
||||
- memory above 100 MB will be swapped out,
|
||||
|
||||
- in most cases, the app in the container will be slowed down (a lot).
|
||||
|
||||
If we run out of swap, the global OOM killer still intervenes.
|
||||
|
||||
---
|
||||
|
||||
## Limiting both RAM and swap usage
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
docker run -ti --memory 100m --memory-swap 100m python
|
||||
```
|
||||
|
||||
If the container tries to use more than 100 MB of memory, it is killed.
|
||||
|
||||
On the other hand, the application will never be slowed down because of swap.
|
||||
|
||||
---
|
||||
|
||||
## When to pick which strategy?
|
||||
|
||||
- Stateful services (like databases) will lose or corrupt data when killed
|
||||
|
||||
- Allow them to use swap space, but monitor swap usage
|
||||
|
||||
- Stateless services can usually be killed with little impact
|
||||
|
||||
- Limit their mem+swap usage, but monitor if they get killed
|
||||
|
||||
- Ultimately, this is no different from "do I want swap, and how much?"
|
||||
|
||||
---
|
||||
|
||||
## Limiting CPU usage
|
||||
|
||||
- There are no less than 3 ways to limit CPU usage:
|
||||
|
||||
- setting a relative priority with `--cpu-shares`,
|
||||
|
||||
- setting a CPU% limit with `--cpus`,
|
||||
|
||||
- pinning a container to specific CPUs with `--cpuset-cpus`.
|
||||
|
||||
- They can be used separately or together.
|
||||
|
||||
---
|
||||
|
||||
## Setting relative priority
|
||||
|
||||
- Each container has a relative priority used by the Linux scheduler.
|
||||
|
||||
- By default, this priority is 1024.
|
||||
|
||||
- As long as CPU usage is not maxed out, this has no effect.
|
||||
|
||||
- When CPU usage is maxed out, each container receives CPU cycles in proportion of its relative priority.
|
||||
|
||||
- In other words: a container with `--cpu-shares 2048` will receive twice as much than the default.
|
||||
|
||||
---
|
||||
|
||||
## Setting a CPU% limit
|
||||
|
||||
- This setting will make sure that a container doesn't use more than a given % of CPU.
|
||||
|
||||
- The value is expressed in CPUs; therefore:
|
||||
|
||||
`--cpus 0.1` means 10% of one CPU,
|
||||
|
||||
`--cpus 1.0` means 100% of one whole CPU,
|
||||
|
||||
`--cpus 10.0` means 10 entire CPUs.
|
||||
|
||||
---
|
||||
|
||||
## Pinning containers to CPUs
|
||||
|
||||
- On multi-core machines, it is possible to restrict the execution on a set of CPUs.
|
||||
|
||||
- Examples:
|
||||
|
||||
`--cpuset-cpus 0` forces the container to run on CPU 0;
|
||||
|
||||
`--cpuset-cpus 3,5,7` restricts the container to CPUs 3, 5, 7;
|
||||
|
||||
`--cpuset-cpus 0-3,8-11` restricts the container to CPUs 0, 1, 2, 3, 8, 9, 10, 11.
|
||||
|
||||
- This will not reserve the corresponding CPUs!
|
||||
|
||||
(They might still be used by other containers, or uncontainerized processes.)
|
||||
|
||||
---
|
||||
|
||||
## Limiting disk usage
|
||||
|
||||
- Most storage drivers do not support limiting the disk usage of containers.
|
||||
|
||||
(With the exception of devicemapper, but the limit cannot be set easily.)
|
||||
|
||||
- This means that a single container could exhaust disk space for everyone.
|
||||
|
||||
- In practice, however, this is not a concern, because:
|
||||
|
||||
- data files (for stateful services) should reside on volumes,
|
||||
|
||||
- assets (e.g. images, user-generated content...) should reside on object stores or on volume,
|
||||
|
||||
- logs are written on standard output and gathered by the container engine.
|
||||
|
||||
- Container disk usage can be audited with `docker ps -s` and `docker diff`.
|
||||
126
slides/containers/Training_Environment.md
Normal file
126
slides/containers/Training_Environment.md
Normal file
@@ -0,0 +1,126 @@
|
||||
class: title
|
||||
|
||||
# Our training environment
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Our training environment
|
||||
|
||||
- If you are attending a tutorial or workshop:
|
||||
|
||||
- a VM has been provisioned for each student
|
||||
|
||||
- If you are doing or re-doing this course on your own, you can:
|
||||
|
||||
- install Docker locally (as explained in the chapter "Installing Docker")
|
||||
|
||||
- install Docker on e.g. a cloud VM
|
||||
|
||||
- use http://www.play-with-docker.com/ to instantly get a training environment
|
||||
|
||||
---
|
||||
|
||||
## Our Docker VM
|
||||
|
||||
*This section assumes that you are following this course as part of
|
||||
a tutorial, training or workshop, where each student is given an
|
||||
individual Docker VM.*
|
||||
|
||||
- The VM is created just before the training.
|
||||
|
||||
- It will stay up during the whole training.
|
||||
|
||||
- It will be destroyed shortly after the training.
|
||||
|
||||
- It comes pre-loaded with Docker and some other useful tools.
|
||||
|
||||
---
|
||||
|
||||
## What *is* Docker?
|
||||
|
||||
- "Installing Docker" really means "Installing the Docker Engine and CLI".
|
||||
|
||||
- The Docker Engine is a daemon (a service running in the background).
|
||||
|
||||
- This daemon manages containers, the same way that an hypervisor manages VMs.
|
||||
|
||||
- We interact with the Docker Engine by using the Docker CLI.
|
||||
|
||||
- The Docker CLI and the Docker Engine communicate through an API.
|
||||
|
||||
- There are many other programs, and many client libraries, to use that API.
|
||||
|
||||
---
|
||||
|
||||
## Why don't we run Docker locally?
|
||||
|
||||
- We are going to download container images and distribution packages.
|
||||
|
||||
- This could put a bit of stress on the local WiFi and slow us down.
|
||||
|
||||
- Instead, we use a remote VM that has a good connectivity
|
||||
|
||||
- In some rare cases, installing Docker locally is challenging:
|
||||
|
||||
- no administrator/root access (computer managed by strict corp IT)
|
||||
|
||||
- 32-bit CPU or OS
|
||||
|
||||
- old OS version (e.g. CentOS 6, OSX pre-Yosemite, Windows 7)
|
||||
|
||||
- It's better to spend time learning containers than fiddling with the installer!
|
||||
|
||||
---
|
||||
|
||||
## Connecting to your Virtual Machine
|
||||
|
||||
You need an SSH client.
|
||||
|
||||
* On OS X, Linux, and other UNIX systems, just use `ssh`:
|
||||
|
||||
```bash
|
||||
$ ssh <login>@<ip-address>
|
||||
```
|
||||
|
||||
* On Windows, if you don't have an SSH client, you can download:
|
||||
|
||||
* Putty (www.putty.org)
|
||||
|
||||
* Git BASH (https://git-for-windows.github.io/)
|
||||
|
||||
* MobaXterm (http://moabaxterm.mobatek.net)
|
||||
|
||||
---
|
||||
|
||||
## Checking your Virtual Machine
|
||||
|
||||
Once logged in, make sure that you can run a basic Docker command:
|
||||
|
||||
.small[
|
||||
```bash
|
||||
$ docker version
|
||||
Client:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:10:06 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
Orchestrator: swarm
|
||||
|
||||
Server:
|
||||
Engine:
|
||||
Version: 18.03.0-ce
|
||||
API version: 1.37 (minimum version 1.12)
|
||||
Go version: go1.9.4
|
||||
Git commit: 0520e24
|
||||
Built: Wed Mar 21 23:08:35 2018
|
||||
OS/Arch: linux/amd64
|
||||
Experimental: false
|
||||
```
|
||||
]
|
||||
|
||||
If this doesn't work, raise your hand so that an instructor can assist you!
|
||||
@@ -1,9 +1,9 @@
|
||||
|
||||
class: title
|
||||
|
||||
# Working with Volumes
|
||||
# Working with volumes
|
||||
|
||||

|
||||

|
||||
|
||||
---
|
||||
|
||||
@@ -19,7 +19,7 @@ At the end of this section, you will be able to:
|
||||
|
||||
---
|
||||
|
||||
## Working with Volumes
|
||||
## Working with volumes
|
||||
|
||||
Docker volumes can be used to achieve many things, including:
|
||||
|
||||
@@ -33,6 +33,8 @@ Docker volumes can be used to achieve many things, including:
|
||||
|
||||
* Sharing a *single file* between the host and a container.
|
||||
|
||||
* Using remote storage and custom storage with "volume drivers".
|
||||
|
||||
---
|
||||
|
||||
## Volumes are special directories in a container
|
||||
@@ -95,12 +97,12 @@ We will see an example in the following slides.
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Sharing web application logs with another container
|
||||
## Sharing app server logs with another container
|
||||
|
||||
Let's start a Tomcat container:
|
||||
|
||||
```bash
|
||||
$ docker run --name webapp -d -p 8080:8080 -v /usr/local/tomcat/logs
|
||||
$ docker run --name webapp -d -p 8080:8080 -v /usr/local/tomcat/logs tomcat
|
||||
```
|
||||
|
||||
Now, start an `alpine` container accessing the same volume:
|
||||
@@ -118,7 +120,7 @@ $ curl localhost:8080
|
||||
|
||||
## Volumes exist independently of containers
|
||||
|
||||
If a container is stopped, its volumes still exist and are available.
|
||||
If a container is stopped or removed, its volumes still exist and are available.
|
||||
|
||||
Volumes can be listed and manipulated with `docker volume` subcommands:
|
||||
|
||||
@@ -201,7 +203,7 @@ Then run `curl localhost:1234` again to see your changes.
|
||||
|
||||
---
|
||||
|
||||
## Managing volumes explicitly
|
||||
## Using custom "bind-mounts"
|
||||
|
||||
In some cases, you want a specific directory on the host to be mapped
|
||||
inside the container:
|
||||
@@ -244,6 +246,8 @@ of an existing container.
|
||||
|
||||
* Newer containers can use `--volumes-from` too.
|
||||
|
||||
* Doesn't work across servers, so not usable in clusters (Swarm, Kubernetes).
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
@@ -259,7 +263,7 @@ $ docker run -d --name redis28 redis:2.8
|
||||
Connect to the Redis container and set some data.
|
||||
|
||||
```bash
|
||||
$ docker run -ti --link redis28:redis alpine telnet redis 6379
|
||||
$ docker run -ti --link redis28:redis busybox telnet redis 6379
|
||||
```
|
||||
|
||||
Issue the following commands:
|
||||
@@ -298,7 +302,7 @@ class: extra-details
|
||||
Connect to the Redis container and see our data.
|
||||
|
||||
```bash
|
||||
docker run -ti --link redis30:redis alpine telnet redis 6379
|
||||
docker run -ti --link redis30:redis busybox telnet redis 6379
|
||||
```
|
||||
|
||||
Issue a few commands.
|
||||
@@ -311,9 +315,9 @@ QUIT
|
||||
|
||||
---
|
||||
|
||||
## What happens when you remove containers with volumes?
|
||||
## Volumes lifecycle
|
||||
|
||||
* Volumes are kept around.
|
||||
* When you remove a container, its volumes are kept around.
|
||||
|
||||
* You can list them with `docker volume ls`.
|
||||
|
||||
@@ -371,9 +375,9 @@ $ docker inspect <yourContainerID>
|
||||
|
||||
---
|
||||
|
||||
## Sharing a single file between the host and a container
|
||||
## Sharing a single file
|
||||
|
||||
The same `-v` flag can be used to share a single file.
|
||||
The same `-v` flag can be used to share a single file (instead of a directory).
|
||||
|
||||
One of the most interesting examples is to share the Docker control socket.
|
||||
|
||||
@@ -381,8 +385,11 @@ One of the most interesting examples is to share the Docker control socket.
|
||||
$ docker run -it -v /var/run/docker.sock:/var/run/docker.sock docker sh
|
||||
```
|
||||
|
||||
Warning: when using such mounts, the container gains root-like access to the host.
|
||||
It can potentially do bad things.
|
||||
From that container, you can now run `docker` commands communicating with
|
||||
the Docker Engine running on the host. Try `docker ps`!
|
||||
|
||||
.warning[Since that container has access to the Docker socket, it
|
||||
has root-like access to the host.]
|
||||
|
||||
---
|
||||
|
||||
@@ -391,10 +398,56 @@ It can potentially do bad things.
|
||||
You can install plugins to manage volumes backed by particular storage systems,
|
||||
or providing extra features. For instance:
|
||||
|
||||
* [dvol](https://github.com/ClusterHQ/dvol) - allows to commit/branch/rollback volumes;
|
||||
* [Flocker](https://clusterhq.com/flocker/introduction/), [REX-Ray](https://github.com/emccode/rexray) - create and manage volumes backed by an enterprise storage system (e.g. SAN or NAS), or by cloud block stores (e.g. EBS);
|
||||
* [Blockbridge](http://www.blockbridge.com/), [Portworx](http://portworx.com/) - provide distributed block store for containers;
|
||||
* and much more!
|
||||
* [REX-Ray](https://rexray.io/) - create and manage volumes backed by an enterprise storage system (e.g.
|
||||
SAN or NAS), or by cloud block stores (e.g. EBS, EFS).
|
||||
|
||||
* [Portworx](http://portworx.com/) - provides distributed block store for containers.
|
||||
|
||||
* [Gluster](https://www.gluster.org/) - open source software-defined distributed storage that can scale
|
||||
to several petabytes. It provides interfaces for object, block and file storage.
|
||||
|
||||
* and much more at the [Docker Store](https://store.docker.com/search?category=volume&q=&type=plugin)!
|
||||
|
||||
---
|
||||
|
||||
## Volumes vs. Mounts
|
||||
|
||||
* Since Docker 17.06, a new options is available: `--mount`.
|
||||
|
||||
* It offers a new, richer syntax to manipulate data in containers.
|
||||
|
||||
* It makes an explicit difference between:
|
||||
|
||||
- volumes (identified with a unique name, managed by a storage plugin),
|
||||
|
||||
- bind mounts (identified with a host path, not managed).
|
||||
|
||||
* The former `-v` / `--volume` option is still usable.
|
||||
|
||||
---
|
||||
|
||||
## `--mount` syntax
|
||||
|
||||
Binding a host path to a container path:
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount type=bind,source=/path/on/host,target=/path/in/container alpine
|
||||
```
|
||||
|
||||
Mounting a volume to a container path:
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount source=myvolume,target=/path/in/container alpine
|
||||
```
|
||||
|
||||
Mounting a tmpfs (in-memory, for temporary files):
|
||||
|
||||
```bash
|
||||
$ docker run \
|
||||
--mount type=tmpfs,destination=/path/in/container,tmpfs-size=1000000 alpine
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
39
slides/containers/intro.md
Normal file
39
slides/containers/intro.md
Normal file
@@ -0,0 +1,39 @@
|
||||
## A brief introduction
|
||||
|
||||
- This was initially written to support in-person, instructor-led workshops and tutorials
|
||||
|
||||
- These materials are maintained by [Jérôme Petazzoni](https://twitter.com/jpetazzo) and [multiple contributors](https://@@GITREPO@@/graphs/contributors)
|
||||
|
||||
- You can also follow along on your own, at your own pace
|
||||
|
||||
- We included as much information as possible in these slides
|
||||
|
||||
- We recommend having a mentor to help you ...
|
||||
|
||||
- ... Or be comfortable spending some time reading the Docker
|
||||
[documentation](https://docs.docker.com/) ...
|
||||
|
||||
- ... And looking for answers in the [Docker forums](forums.docker.com),
|
||||
[StackOverflow](http://stackoverflow.com/questions/tagged/docker),
|
||||
and other outlets
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Hands on, you shall practice
|
||||
|
||||
- Nobody ever became a Jedi by spending their lives reading Wookiepedia
|
||||
|
||||
- Likewise, it will take more than merely *reading* these slides
|
||||
to make you an expert
|
||||
|
||||
- These slides include *tons* of exercises and examples
|
||||
|
||||
- They assume that you have acccess to a machine running Docker
|
||||
|
||||
- If you are attending a workshop or tutorial:
|
||||
<br/>you will be given specific instructions to access a cloud VM
|
||||
|
||||
- If you are doing this on your own:
|
||||
<br/>we will tell you how to install Docker or access a Docker environment
|
||||
1
slides/containers/links.md
Symbolic link
1
slides/containers/links.md
Symbolic link
@@ -0,0 +1 @@
|
||||
../swarm/links.md
|
||||
57
slides/count-slides.py
Executable file
57
slides/count-slides.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python
|
||||
import re
|
||||
import sys
|
||||
|
||||
PREFIX = "name: toc-"
|
||||
EXCLUDED = ["in-person"]
|
||||
|
||||
class State(object):
|
||||
def __init__(self):
|
||||
self.current_slide = 1
|
||||
self.section_title = None
|
||||
self.section_start = 0
|
||||
self.section_slides = 0
|
||||
self.chapters = {}
|
||||
self.sections = {}
|
||||
def show(self):
|
||||
if self.section_title.startswith("chapter-"):
|
||||
return
|
||||
print("{0.section_title}\t{0.section_start}\t{0.section_slides}".format(self))
|
||||
self.sections[self.section_title] = self.section_slides
|
||||
|
||||
state = State()
|
||||
|
||||
title = None
|
||||
for line in open(sys.argv[1]):
|
||||
line = line.rstrip()
|
||||
if line.startswith(PREFIX):
|
||||
if state.section_title is None:
|
||||
print("{}\t{}\t{}".format("title", "index", "size"))
|
||||
else:
|
||||
state.show()
|
||||
state.section_title = line[len(PREFIX):].strip()
|
||||
state.section_start = state.current_slide
|
||||
state.section_slides = 0
|
||||
if line == "---":
|
||||
state.current_slide += 1
|
||||
state.section_slides += 1
|
||||
if line == "--":
|
||||
state.current_slide += 1
|
||||
toc_links = re.findall("\(#toc-(.*)\)", line)
|
||||
if toc_links and state.section_title.startswith("chapter-"):
|
||||
if state.section_title not in state.chapters:
|
||||
state.chapters[state.section_title] = []
|
||||
state.chapters[state.section_title].append(toc_links[0])
|
||||
# This is really hackish
|
||||
if line.startswith("class:"):
|
||||
for klass in EXCLUDED:
|
||||
if klass in line:
|
||||
state.section_slides -= 1
|
||||
state.current_slide -= 1
|
||||
|
||||
state.show()
|
||||
|
||||
for chapter in sorted(state.chapters, key=lambda f: int(f.split("-")[1])):
|
||||
chapter_size = sum(state.sections[s] for s in state.chapters[chapter])
|
||||
print("{}\t{}\t{}".format("total size for", chapter, chapter_size))
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
chat: "[Slack](https://dockercommunity.slack.com/messages/C7ET1GY4Q)"
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- auto-btp
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
title: "Swarm: from Zero to Hero (DC17EU)"
|
||||
chapters:
|
||||
- |
|
||||
class: title
|
||||
|
||||
.small[
|
||||
|
||||
Swarm: from Zero to Hero
|
||||
|
||||
.small[.small[
|
||||
|
||||
**Be kind to the WiFi!**
|
||||
|
||||
*Use the 5G network*
|
||||
<br/>
|
||||
*Don't use your hotspot*
|
||||
<br/>
|
||||
*Don't stream videos from YouTube, Netflix, etc.
|
||||
<br/>(if you're bored, watch local content instead)*
|
||||
|
||||
Also: share the power outlets
|
||||
<br/>
|
||||
*(with limited power comes limited responsibility?)*
|
||||
<br/>
|
||||
*(or something?)*
|
||||
|
||||
Thank you!
|
||||
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Intros
|
||||
|
||||
<!--
|
||||
- Hello! We are
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
&
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
- Hello! We are Jérôme, Lee, Nicholas, and Scott
|
||||
|
||||
<!--
|
||||
I am
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
--
|
||||
|
||||
- This is our collective Docker knowledge:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## "From zero to hero"
|
||||
|
||||
--
|
||||
|
||||
- It rhymes, but it's a pretty bad title, to be honest
|
||||
|
||||
--
|
||||
|
||||
- None of you is a "zero"
|
||||
|
||||
--
|
||||
|
||||
- None of us is a "hero"
|
||||
|
||||
--
|
||||
|
||||
- None of us should even try to be a hero
|
||||
|
||||
--
|
||||
|
||||
*The hero syndrome is a phenomenon affecting people who seek heroism or recognition,
|
||||
usually by creating a desperate situation which they can resolve.
|
||||
This can include unlawful acts, such as arson.
|
||||
The phenomenon has been noted to affect civil servants,
|
||||
such as firefighters, nurses, police officers, and security guards.*
|
||||
|
||||
(Wikipedia page on [hero syndrome](https://en.wikipedia.org/wiki/Hero_syndrome))
|
||||
|
||||
---
|
||||
|
||||
## Agenda
|
||||
|
||||
.small[
|
||||
- 09:00-09:10 Hello!
|
||||
- 09:10-10:30 Part 1
|
||||
- 10:30-11:00 coffee break
|
||||
- 11:00-12:30 Part 2
|
||||
- 12:30-13:30 lunch break
|
||||
- 13:30-15:00 Part 3
|
||||
- 15:00-15:30 coffee break
|
||||
- 15:30-17:00 Part 4
|
||||
- 17:00-18:00 Afterhours and Q&A
|
||||
]
|
||||
|
||||
<!--
|
||||
- The tutorial will run from 9:00am to 12:20pm
|
||||
|
||||
- This will be fast-paced, but DON'T PANIC!
|
||||
|
||||
- There will be a coffee break at 10:30am
|
||||
<br/>
|
||||
(please remind me if I forget about it!)
|
||||
-->
|
||||
|
||||
- All the content is publicly available (slides, code samples, scripts)
|
||||
|
||||
Upstream URL: https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- Live feedback, questions, help on [Gitter](chat)
|
||||
|
||||
http://container.training/chat
|
||||
|
||||
- swarm/intro.md
|
||||
- |
|
||||
@@TOC@@
|
||||
- - swarm/prereqs.md
|
||||
- swarm/versions.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
All right!
|
||||
<br/>
|
||||
We're all set.
|
||||
<br/>
|
||||
Let's do this.
|
||||
- common/sampleapp.md
|
||||
- swarm/swarmkit.md
|
||||
- swarm/creatingswarm.md
|
||||
- swarm/morenodes.md
|
||||
- - swarm/firstservice.md
|
||||
- swarm/ourapponswarm.md
|
||||
- swarm/updatingservices.md
|
||||
- swarm/healthchecks.md
|
||||
- - swarm/operatingswarm.md
|
||||
- swarm/netshoot.md
|
||||
- swarm/ipsec.md
|
||||
- swarm/swarmtools.md
|
||||
- swarm/security.md
|
||||
- swarm/secrets.md
|
||||
- swarm/encryptionatrest.md
|
||||
- swarm/leastprivilege.md
|
||||
- swarm/apiscope.md
|
||||
- - swarm/logging.md
|
||||
- swarm/metrics.md
|
||||
- swarm/stateful.md
|
||||
- swarm/extratips.md
|
||||
- swarm/end.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
That's all folks! <br/> Questions?
|
||||
|
||||
.small[.small[
|
||||
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo)) — [@docker](https://twitter.com/docker)
|
||||
|
||||
]]
|
||||
|
||||
<!--
|
||||
Tiffany ([@tiffanyfayj](https://twitter.com/tiffanyfayj))
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
-->
|
||||
2
slides/find-non-ascii.sh
Executable file
2
slides/find-non-ascii.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
grep --color=auto -P -n "[^\x00-\x80]" */*.md
|
||||
|
Before Width: | Height: | Size: 84 KiB After Width: | Height: | Size: 84 KiB |
BIN
slides/images/binpacking-1d-1.gif
Normal file
BIN
slides/images/binpacking-1d-1.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 9.4 KiB |
BIN
slides/images/binpacking-1d-2.gif
Normal file
BIN
slides/images/binpacking-1d-2.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 7.8 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user