mirror of
https://github.com/jpetazzo/container.training.git
synced 2026-02-15 01:59:57 +00:00
Compare commits
520 Commits
gitpod
...
2022-06-pr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b1f665ca69 | ||
|
|
b503c28065 | ||
|
|
bd027cadf2 | ||
|
|
17667ed356 | ||
|
|
f43e6eacd9 | ||
|
|
b49fb92da0 | ||
|
|
aa85b2f128 | ||
|
|
cd494da8f3 | ||
|
|
7b5932ded0 | ||
|
|
732a6255f3 | ||
|
|
9af4fae73d | ||
|
|
a3bb68a255 | ||
|
|
907e769d4e | ||
|
|
71ba3ec520 | ||
|
|
cc6c0d5db8 | ||
|
|
9ed00c5da1 | ||
|
|
b4b67536e9 | ||
|
|
52ce402803 | ||
|
|
7076152bb9 | ||
|
|
39eebe320f | ||
|
|
97c563e76a | ||
|
|
4a7b04dd01 | ||
|
|
8b3f7a9aba | ||
|
|
f9bb780f80 | ||
|
|
94545f800a | ||
|
|
5896ad577b | ||
|
|
030f3728f7 | ||
|
|
913c934dbb | ||
|
|
b6b718635a | ||
|
|
a830d51e5e | ||
|
|
7af1a4cfbc | ||
|
|
4f6b4b0306 | ||
|
|
888aad583e | ||
|
|
f7c1e87a89 | ||
|
|
2e4e6bc787 | ||
|
|
1b704316c8 | ||
|
|
2e6e5425d0 | ||
|
|
5e2aac701e | ||
|
|
bb19d525e9 | ||
|
|
8ca6c5ba40 | ||
|
|
e1290c5b84 | ||
|
|
2c2574fece | ||
|
|
5c96b40bbd | ||
|
|
5aa20362eb | ||
|
|
a01fecf679 | ||
|
|
b75d6562b5 | ||
|
|
7f5944b157 | ||
|
|
21287d16bf | ||
|
|
9434b40b58 | ||
|
|
b59f5dd00d | ||
|
|
d8ad0021cc | ||
|
|
8dbd6d54a0 | ||
|
|
b454749e92 | ||
|
|
9a71d0e260 | ||
|
|
25e844fdf4 | ||
|
|
c40f4f5f2a | ||
|
|
cfa89b3ab5 | ||
|
|
a10cf8d9c3 | ||
|
|
749e5da20b | ||
|
|
69c7ac2371 | ||
|
|
de0ad83686 | ||
|
|
f630f08713 | ||
|
|
920a075afe | ||
|
|
a47c51618b | ||
|
|
f3156513b8 | ||
|
|
96de30ca78 | ||
|
|
8de9e6e868 | ||
|
|
7eb90b9d6f | ||
|
|
931455ba31 | ||
|
|
f02cef0351 | ||
|
|
9054fd58ea | ||
|
|
24aa1ae9f7 | ||
|
|
c1c4e48457 | ||
|
|
0614087b2f | ||
|
|
3745d0e12a | ||
|
|
90885e49cf | ||
|
|
07d02e345e | ||
|
|
f2311545cd | ||
|
|
e902962f3a | ||
|
|
ee7547999c | ||
|
|
34fd6c0393 | ||
|
|
e67fca695e | ||
|
|
b56e54eaec | ||
|
|
2669eae49b | ||
|
|
c26e51d69c | ||
|
|
c9518631e5 | ||
|
|
164651c461 | ||
|
|
1d8062f1dc | ||
|
|
98671ea756 | ||
|
|
f0c0266c06 | ||
|
|
1c48145cc4 | ||
|
|
eced9b46d6 | ||
|
|
74947e8265 | ||
|
|
9f9016de0c | ||
|
|
cd9751a765 | ||
|
|
e48448128d | ||
|
|
66b161d8ec | ||
|
|
031a2f7019 | ||
|
|
6fb446b900 | ||
|
|
ba45fe932f | ||
|
|
4adb75f0cb | ||
|
|
c9507b4c83 | ||
|
|
c9e7dd6dfa | ||
|
|
bc761d512a | ||
|
|
a368a3c21b | ||
|
|
607158bda3 | ||
|
|
52015b81fe | ||
|
|
93d8a23c81 | ||
|
|
5e50f2a3a4 | ||
|
|
5d3ab6b61f | ||
|
|
ff260c2731 | ||
|
|
2fc6d23d1e | ||
|
|
bbbcadeb26 | ||
|
|
fe46b62f14 | ||
|
|
60e5d5627b | ||
|
|
be1bf50a43 | ||
|
|
2893ec8c7f | ||
|
|
dc89be170a | ||
|
|
8f03ce674a | ||
|
|
23eb0ed771 | ||
|
|
cc62e19274 | ||
|
|
92cd81b170 | ||
|
|
d9e29eb4a4 | ||
|
|
00b167207d | ||
|
|
d34017cff1 | ||
|
|
d53ba51a9f | ||
|
|
90ce84ace3 | ||
|
|
bcbfc747a2 | ||
|
|
2f83b7f256 | ||
|
|
753324cc89 | ||
|
|
97735bafa8 | ||
|
|
faabbb13ae | ||
|
|
10b16ce9e9 | ||
|
|
055c0a304f | ||
|
|
956b079c58 | ||
|
|
603d18ba2d | ||
|
|
ce9fea2b23 | ||
|
|
4b500cd37e | ||
|
|
207845f15a | ||
|
|
f3ee3a923e | ||
|
|
f4837d9e28 | ||
|
|
a3a8c824c8 | ||
|
|
7fc2d5e57f | ||
|
|
b3bc5ccb9b | ||
|
|
fcf0296245 | ||
|
|
cc1340801e | ||
|
|
538d3212e4 | ||
|
|
ec31c1fa17 | ||
|
|
b9041d3d39 | ||
|
|
ce0ae6e35b | ||
|
|
3b9a2113a5 | ||
|
|
b67691c7e7 | ||
|
|
081380fda8 | ||
|
|
e9e3fae21f | ||
|
|
03c0abb182 | ||
|
|
372146a114 | ||
|
|
e7a74769b5 | ||
|
|
2e096d85c7 | ||
|
|
acbe355f1e | ||
|
|
733166fbd7 | ||
|
|
5f0a57477d | ||
|
|
a85c8a3240 | ||
|
|
c9820376ff | ||
|
|
72c02c6fcf | ||
|
|
181844ebea | ||
|
|
79fe6c1f5c | ||
|
|
7180e832fe | ||
|
|
ae74d9069f | ||
|
|
8fed7a8adb | ||
|
|
f9c7be9697 | ||
|
|
d267ca1632 | ||
|
|
c9e93540ba | ||
|
|
f4345d3241 | ||
|
|
97e8106669 | ||
|
|
54b6948eeb | ||
|
|
ce29289bed | ||
|
|
7801fc5131 | ||
|
|
b260ad8482 | ||
|
|
61bd320363 | ||
|
|
47766be4b2 | ||
|
|
fb8efbe29f | ||
|
|
ca0c721ba0 | ||
|
|
1500b5937d | ||
|
|
6e1a9925ea | ||
|
|
b7dd363ccd | ||
|
|
c5cd84e274 | ||
|
|
108f936f84 | ||
|
|
3594fef67a | ||
|
|
021929e50e | ||
|
|
e3fa685ee1 | ||
|
|
4f662d14cc | ||
|
|
d956da1733 | ||
|
|
1b820f3bc1 | ||
|
|
f1d4704b0e | ||
|
|
71423233bd | ||
|
|
b508360227 | ||
|
|
7cd47243ab | ||
|
|
a9d84b01d8 | ||
|
|
4df547d9b1 | ||
|
|
d14f86e683 | ||
|
|
92cdb4146b | ||
|
|
0ca798bc30 | ||
|
|
8025d37188 | ||
|
|
3318ce84e4 | ||
|
|
3e29881ece | ||
|
|
b91ed846a0 | ||
|
|
f123878c85 | ||
|
|
2c048a0193 | ||
|
|
ee7bd37f83 | ||
|
|
166cacc48e | ||
|
|
9595179f03 | ||
|
|
3b6509b95b | ||
|
|
c84a5ce6b7 | ||
|
|
4402c17eb9 | ||
|
|
4f04046fea | ||
|
|
6a6882802d | ||
|
|
75f33bb9d8 | ||
|
|
ab266aba83 | ||
|
|
e26eeb4386 | ||
|
|
98429e14f0 | ||
|
|
bbf65f7433 | ||
|
|
cb6f3989fd | ||
|
|
dbc87e7a0d | ||
|
|
08d7b93be1 | ||
|
|
b66b8d25af | ||
|
|
f780e4a0e6 | ||
|
|
a129187ce1 | ||
|
|
ac0547d96b | ||
|
|
58ccebf5c7 | ||
|
|
56b9b864bb | ||
|
|
f49a8f2ec9 | ||
|
|
ea031a6231 | ||
|
|
c92e887c53 | ||
|
|
a6992e0c09 | ||
|
|
07818688a7 | ||
|
|
c624415e78 | ||
|
|
112f6ec3b7 | ||
|
|
f51b5c7244 | ||
|
|
88a5041943 | ||
|
|
8d7f8c9c05 | ||
|
|
19fc53dbbd | ||
|
|
d74a331a05 | ||
|
|
53a3c8a86a | ||
|
|
2214717aaa | ||
|
|
e75e4d7f2c | ||
|
|
84c33b9eae | ||
|
|
e606cd2b21 | ||
|
|
d217e52ab5 | ||
|
|
f3c3646298 | ||
|
|
f25bf60d46 | ||
|
|
6ab11ca91c | ||
|
|
a5d857edd4 | ||
|
|
25d6073b17 | ||
|
|
216fefad23 | ||
|
|
f3eb9ce12f | ||
|
|
a484425c81 | ||
|
|
67806fc592 | ||
|
|
cfcf874bac | ||
|
|
858afc846c | ||
|
|
629b4d1037 | ||
|
|
58f2894b54 | ||
|
|
df1db67e53 | ||
|
|
068c81bdcd | ||
|
|
911d78aede | ||
|
|
305674fa3c | ||
|
|
6bdc687cc7 | ||
|
|
49e3a0b75f | ||
|
|
5acb05dfff | ||
|
|
edaef92b35 | ||
|
|
63fccb495f | ||
|
|
055c8a7267 | ||
|
|
f72847bc81 | ||
|
|
4be82f4f57 | ||
|
|
cb760dbe94 | ||
|
|
f306749f68 | ||
|
|
8d20fa4654 | ||
|
|
249d446ef2 | ||
|
|
fe84dec863 | ||
|
|
ce8dc2cdff | ||
|
|
bc33f1f5df | ||
|
|
8597ca1956 | ||
|
|
2300d0719b | ||
|
|
2e6230a9a0 | ||
|
|
ae17c2479c | ||
|
|
23f7e8cff9 | ||
|
|
f72cf16c82 | ||
|
|
6ec8849da1 | ||
|
|
6c11de207a | ||
|
|
2295e4f3de | ||
|
|
18853b2497 | ||
|
|
426957bdca | ||
|
|
6bc08c0a7e | ||
|
|
88d4e5ff54 | ||
|
|
e3e4d04202 | ||
|
|
be6d982e2c | ||
|
|
04bc8a9f60 | ||
|
|
b0dc1c7c3f | ||
|
|
bb1b225026 | ||
|
|
2160aa7f40 | ||
|
|
8f75a4cd7f | ||
|
|
45213a8f2e | ||
|
|
f03aedd024 | ||
|
|
fcfcb127b4 | ||
|
|
5380b2d52a | ||
|
|
cc5da860b9 | ||
|
|
9e9b17f6c9 | ||
|
|
b9ea938157 | ||
|
|
b23aacdce0 | ||
|
|
c3d6e5e660 | ||
|
|
907adf8075 | ||
|
|
dff505ac76 | ||
|
|
df0ffc4d75 | ||
|
|
02278b3748 | ||
|
|
ab959220ba | ||
|
|
b4576e39d0 | ||
|
|
894dafeecb | ||
|
|
366c656d82 | ||
|
|
a60f929232 | ||
|
|
fdc58cafda | ||
|
|
8de186b909 | ||
|
|
b816d075d4 | ||
|
|
6303b67b86 | ||
|
|
4f3bb9beb2 | ||
|
|
1f34da55b3 | ||
|
|
f30792027f | ||
|
|
74679ab77e | ||
|
|
71ce2eb31a | ||
|
|
eb96dd21bb | ||
|
|
b1adca025d | ||
|
|
e82d2812aa | ||
|
|
9c8c3ef537 | ||
|
|
2f2948142a | ||
|
|
2516b2d32b | ||
|
|
42f4b65c87 | ||
|
|
989a62b5ff | ||
|
|
b5eb59ab80 | ||
|
|
10920509c3 | ||
|
|
955149e019 | ||
|
|
111ff30c38 | ||
|
|
6c038a5d33 | ||
|
|
6737a20840 | ||
|
|
1d1060a319 | ||
|
|
93e9a60634 | ||
|
|
de2c0e72c3 | ||
|
|
41204c948b | ||
|
|
553b1f7871 | ||
|
|
bd168f7676 | ||
|
|
3a527649d1 | ||
|
|
ecbbcf8b51 | ||
|
|
29edb1aefe | ||
|
|
bd3c91f342 | ||
|
|
fa709f0cb4 | ||
|
|
543b44fb29 | ||
|
|
536a9cc44b | ||
|
|
2ff3d88bab | ||
|
|
295ee9b6b4 | ||
|
|
17c5f6de01 | ||
|
|
556dbb965c | ||
|
|
32250f8053 | ||
|
|
bdede6de07 | ||
|
|
eefdc21488 | ||
|
|
e145428910 | ||
|
|
76789b6113 | ||
|
|
f9660ba9dc | ||
|
|
c2497508f8 | ||
|
|
b5d3b213b1 | ||
|
|
b4c76ad11d | ||
|
|
b251ff3812 | ||
|
|
ede4ea0dd5 | ||
|
|
2ab06c6dfd | ||
|
|
3a01deb039 | ||
|
|
b88f63e1f7 | ||
|
|
918311ac51 | ||
|
|
73e8110f09 | ||
|
|
ecb5106d59 | ||
|
|
e4d8cd4952 | ||
|
|
c4aedbd327 | ||
|
|
2fb3584b1b | ||
|
|
cb90cc9a1e | ||
|
|
bf28dff816 | ||
|
|
b5cb871c69 | ||
|
|
aa8f538574 | ||
|
|
ebf2e23785 | ||
|
|
0553a1ba8b | ||
|
|
9d47177028 | ||
|
|
9d4a035497 | ||
|
|
6fe74cb35c | ||
|
|
43aa41ed51 | ||
|
|
f6e810f648 | ||
|
|
4c710d6826 | ||
|
|
410c98399e | ||
|
|
19c9843a81 | ||
|
|
69d084e04a | ||
|
|
1300d76890 | ||
|
|
0040313371 | ||
|
|
c9e04b906d | ||
|
|
41f66f4144 | ||
|
|
aced587fd0 | ||
|
|
749b3d1648 | ||
|
|
c40cc71bbc | ||
|
|
69b775ef27 | ||
|
|
3bfc14c5f7 | ||
|
|
97984af8a2 | ||
|
|
9b31c45899 | ||
|
|
c0db28d439 | ||
|
|
0e49bfa837 | ||
|
|
fc9c0a6285 | ||
|
|
d4914fa168 | ||
|
|
e4edd9445c | ||
|
|
ba7deefce5 | ||
|
|
be104f1b44 | ||
|
|
5c329b0b79 | ||
|
|
78ffd22499 | ||
|
|
33174a1682 | ||
|
|
d402a2ea93 | ||
|
|
1fc3abcffd | ||
|
|
c1020f24b1 | ||
|
|
4fc81209d4 | ||
|
|
ed841711c5 | ||
|
|
07457af6f7 | ||
|
|
2d4961fbd3 | ||
|
|
14679999be | ||
|
|
29c6d2876a | ||
|
|
a02e7429ad | ||
|
|
fee0be7f09 | ||
|
|
d98fcbce87 | ||
|
|
35320837e5 | ||
|
|
d73e597198 | ||
|
|
b4c0378114 | ||
|
|
efdc4fcfa9 | ||
|
|
c32fcc81bb | ||
|
|
f6930042bd | ||
|
|
2e2767b090 | ||
|
|
115cc5e0c0 | ||
|
|
d252fe254b | ||
|
|
7d96562042 | ||
|
|
4ded8c699d | ||
|
|
620a3df798 | ||
|
|
d28723f07a | ||
|
|
f2334d2d1b | ||
|
|
ddf79eebc7 | ||
|
|
6467264ff5 | ||
|
|
55fcff9333 | ||
|
|
8fb7ea3908 | ||
|
|
7dd72f123f | ||
|
|
ff95066006 | ||
|
|
8146c4dabe | ||
|
|
17aea33beb | ||
|
|
9770f81a1c | ||
|
|
0cb9095303 | ||
|
|
ffded8469b | ||
|
|
0e892cf8b4 | ||
|
|
b87efbd6e9 | ||
|
|
1a24b530d6 | ||
|
|
122ffec5c2 | ||
|
|
276a2dbdda | ||
|
|
2836b58078 | ||
|
|
0d065788a4 | ||
|
|
14271a4df0 | ||
|
|
412d029d0c | ||
|
|
f960230f8e | ||
|
|
774c8a0e31 | ||
|
|
4671a981a7 | ||
|
|
b9743a5f8c | ||
|
|
df4980750c | ||
|
|
9467c7309e | ||
|
|
86b0380a77 | ||
|
|
eb9052ae9a | ||
|
|
8f85332d8a | ||
|
|
0479ad2285 | ||
|
|
986d7eb9c2 | ||
|
|
3fafbb8d4e | ||
|
|
5a24df3fd4 | ||
|
|
1bbfba0531 | ||
|
|
8d98431ba0 | ||
|
|
c31c81a286 | ||
|
|
a0314fc5f5 | ||
|
|
3f088236a4 | ||
|
|
ce4e2ffe46 | ||
|
|
c3a05a6393 | ||
|
|
40b2b8e62e | ||
|
|
efdcf4905d | ||
|
|
bdb57c05b4 | ||
|
|
af0762a0a2 | ||
|
|
0d6c364a95 | ||
|
|
690a1eb75c | ||
|
|
c796a6bfc1 | ||
|
|
0b10d3d40d | ||
|
|
cdb50925da | ||
|
|
ca1f8ec828 | ||
|
|
7302d3533f | ||
|
|
d3c931e602 | ||
|
|
7402c8e6a8 | ||
|
|
1de539bff8 | ||
|
|
a6c7d69986 | ||
|
|
b0bff595cf | ||
|
|
3d724d87db | ||
|
|
8c04154430 | ||
|
|
66b7d118ba | ||
|
|
a772fff88e | ||
|
|
57af933c2d | ||
|
|
4888ec1f5b | ||
|
|
6f806ed200 | ||
|
|
0c8b20f6b6 | ||
|
|
2ba35e1f8d | ||
|
|
eb0d9bed2a | ||
|
|
bab493a926 | ||
|
|
f4f2d83fa4 | ||
|
|
9f049951ab | ||
|
|
7257a5c594 | ||
|
|
102aef5ac5 | ||
|
|
d2b3a1d663 | ||
|
|
d84ada0927 | ||
|
|
0e04b4a07d | ||
|
|
aef910b4b7 | ||
|
|
298b6db20c | ||
|
|
7ec6e871c9 | ||
|
|
a0558e4ee5 | ||
|
|
16a62f9f84 | ||
|
|
2ce50007d2 |
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,9 +1,19 @@
|
||||
*.pyc
|
||||
*.swp
|
||||
*~
|
||||
|
||||
prepare-vms/tags
|
||||
prepare-vms/infra
|
||||
prepare-vms/www
|
||||
|
||||
prepare-tf/.terraform*
|
||||
prepare-tf/terraform.*
|
||||
prepare-tf/stage2/*.tf
|
||||
prepare-tf/stage2/kubeconfig.*
|
||||
prepare-tf/stage2/.terraform*
|
||||
prepare-tf/stage2/terraform.*
|
||||
prepare-tf/stage2/externalips.*
|
||||
|
||||
slides/*.yml.html
|
||||
slides/autopilot/state.yaml
|
||||
slides/index.html
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
hostname frr
|
||||
ip nht resolve-via-default
|
||||
log stdout
|
||||
|
||||
@@ -2,30 +2,36 @@ version: "3"
|
||||
|
||||
services:
|
||||
bgpd:
|
||||
image: ajones17/frr:662
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
entrypoint: /usr/lib/frr/bgpd -f /etc/frr/bgpd.conf --log=stdout --log-level=debug --no_kernel --no_zebra
|
||||
restart: always
|
||||
|
||||
zebra:
|
||||
image: ajones17/frr:662
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
entrypoint: /usr/lib/frr/zebra -f /etc/frr/zebra.conf --log=stdout --log-level=debug
|
||||
restart: always
|
||||
|
||||
vtysh:
|
||||
image: ajones17/frr:662
|
||||
image: frrouting/frr:v8.2.2
|
||||
volumes:
|
||||
- ./conf:/etc/frr
|
||||
- ./run:/var/run/frr
|
||||
network_mode: host
|
||||
entrypoint: vtysh -c "show ip bgp"
|
||||
entrypoint: vtysh
|
||||
|
||||
chmod:
|
||||
image: alpine
|
||||
|
||||
@@ -9,21 +9,21 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount --allow-privileged
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080 --allocate-node-cidrs --cluster-cidr=10.CLUSTER.0.0/16
|
||||
"Edit the CLUSTER placeholder first. Then, remove this line.":
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Note: hyperkube isn't available after Kubernetes 1.18.
|
||||
# So we'll have to update this for Kubernetes 1.19!
|
||||
|
||||
version: "3"
|
||||
|
||||
services:
|
||||
@@ -9,20 +12,20 @@ services:
|
||||
|
||||
etcd:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/etcd:3.4.3
|
||||
image: k8s.gcr.io/etcd:3.4.9
|
||||
command: etcd
|
||||
|
||||
kube-apiserver:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-apiserver --etcd-servers http://127.0.0.1:2379 --address 0.0.0.0 --disable-admission-plugins=ServiceAccount
|
||||
|
||||
kube-controller-manager:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-controller-manager --master http://localhost:8080
|
||||
|
||||
kube-scheduler:
|
||||
network_mode: "service:pause"
|
||||
image: k8s.gcr.io/hyperkube:v1.17.2
|
||||
image: k8s.gcr.io/hyperkube:v1.18.8
|
||||
command: kube-scheduler --master http://localhost:8080
|
||||
|
||||
72
dockercoins/Tiltfile
Normal file
72
dockercoins/Tiltfile
Normal file
@@ -0,0 +1,72 @@
|
||||
# (1) Setting up a registry, and telling Tilt to use it.
|
||||
|
||||
# Tilt needs a registry to store images.
|
||||
|
||||
# The following manifest defines a Deployment to run a basic Docker registry,
|
||||
# and a NodePort Service to access it. Using a NodePort means that we don't
|
||||
# need to obtain a TLS certificate, because we will be accessing the registry
|
||||
# through localhost.
|
||||
k8s_yaml('../k8s/tilt-registry.yaml')
|
||||
|
||||
# Tell Tilt to use the registry that we just deployed instead of whatever
|
||||
# is defined in our Kubernetes resources. Tilt will patch image names to
|
||||
# use our registry.
|
||||
default_registry('localhost:30555')
|
||||
|
||||
# Create a port forward so that we can access the registry from our local
|
||||
# environment, too. Note that if you run Tilt directly from a Kubernetes node
|
||||
# (which is not typical, but might happen in some lab/training environments)
|
||||
# the following might cause an error because port 30555 is already taken.
|
||||
k8s_resource(workload='tilt-registry', port_forwards='30555:5000')
|
||||
|
||||
# (2) Telling Tilt how to build and run our app.
|
||||
|
||||
# The following two lines will use the kubectl-build plugin
|
||||
# to leverage buildkit and build the images in our Kubernetes
|
||||
# cluster. This is not enabled by default, because it requires
|
||||
# the plugin to be installed.
|
||||
# See https://github.com/vmware-tanzu/buildkit-cli-for-kubectl
|
||||
# for more information about this plugin.
|
||||
#load('ext://kubectl_build', 'kubectl_build')
|
||||
#docker_build = kubectl_build
|
||||
|
||||
# Our Kubernetes manifests use images 'dockercoins/...' so we tell Tilt
|
||||
# how each of these images should be built. The first argument is the name
|
||||
# of the image, the second argument is the directory containing the build
|
||||
# context (i.e. the Dockerfile to build the image).
|
||||
docker_build('dockercoins/hasher', 'hasher')
|
||||
docker_build('dockercoins/rng', 'rng')
|
||||
docker_build('dockercoins/webui', 'webui')
|
||||
docker_build('dockercoins/worker', 'worker')
|
||||
|
||||
# The following manifests defines five Deployments and four Services for
|
||||
# our application.
|
||||
k8s_yaml('../k8s/dockercoins.yaml')
|
||||
|
||||
# (3) Finishing touches.
|
||||
|
||||
# The following line lets Tilt run with the default kubeadm cluster-admin context.
|
||||
allow_k8s_contexts('kubernetes-admin@kubernetes')
|
||||
|
||||
# Note: the whole section below (to set up ngrok tunnels) is disabled,
|
||||
# because ngrok now requires to set up an account to serve HTML
|
||||
# content. So we can still use ngrok for e.g. webhooks and "raw" APIs,
|
||||
# but not to serve web pages like the Tilt UI.
|
||||
|
||||
# # This will run an ngrok tunnel to expose Tilt to the outside world.
|
||||
# # This is intended to be used when Tilt runs on a remote machine.
|
||||
# local_resource(name='ngrok:tunnel', serve_cmd='ngrok http 10350')
|
||||
|
||||
# # This will wait until the ngrok tunnel is up, and show its URL to the user.
|
||||
# # We send the output to /dev/tty so that it doesn't get intercepted by
|
||||
# # Tilt, and gets displayed to the user's terminal instead.
|
||||
# # Note: this assumes that the ngrok instance will be running on port 4040.
|
||||
# # If you have other ngrok instances running on the machine, this might not work.
|
||||
# local_resource(name='ngrok:showurl', cmd='''
|
||||
# while sleep 1; do
|
||||
# TUNNELS=$(curl -fsSL http://localhost:4040/api/tunnels | jq -r .tunnels[].public_url)
|
||||
# [ "$TUNNELS" ] && break
|
||||
# done
|
||||
# printf "\nYou should be able to connect to the Tilt UI with the following URL(s): %s\n" "$TUNNELS" >/dev/tty
|
||||
# '''
|
||||
# )
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM node:4-slim
|
||||
RUN npm install express
|
||||
RUN npm install redis
|
||||
RUN npm install redis@3
|
||||
COPY files/ /files/
|
||||
COPY webui.js /
|
||||
CMD ["node", "webui.js"]
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
color: royalblue;
|
||||
}
|
||||
</style>
|
||||
<script src="jquery.js"></script>
|
||||
<script src="jquery-1.11.3.min.js"></script>
|
||||
<script src="d3.min.js"></script>
|
||||
<script src="rickshaw.min.js"></script>
|
||||
<script>
|
||||
|
||||
1
dockercoins/webui/files/jquery.js
vendored
1
dockercoins/webui/files/jquery.js
vendored
@@ -1 +0,0 @@
|
||||
jquery-1.11.3.min.js
|
||||
8
k8s/Tiltfile.helmchart
Normal file
8
k8s/Tiltfile.helmchart
Normal file
@@ -0,0 +1,8 @@
|
||||
k8s_yaml(helm(
|
||||
"./path-to-chart", name="blue",
|
||||
values=[], # Example: ["./path/to/values.yaml"]
|
||||
set=[
|
||||
"image.repository=jpetazzo/color",
|
||||
"image.tag=latest",
|
||||
]
|
||||
))
|
||||
16
k8s/admission-configuration.yaml
Normal file
16
k8s/admission-configuration.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: apiserver.config.k8s.io/v1
|
||||
kind: AdmissionConfiguration
|
||||
plugins:
|
||||
- name: PodSecurity
|
||||
configuration:
|
||||
apiVersion: pod-security.admission.config.k8s.io/v1alpha1
|
||||
kind: PodSecurityConfiguration
|
||||
defaults:
|
||||
enforce: baseline
|
||||
audit: baseline
|
||||
warn: baseline
|
||||
exemptions:
|
||||
usernames:
|
||||
- cluster-admin
|
||||
namespaces:
|
||||
- kube-system
|
||||
36
k8s/certbot.yaml
Normal file
36
k8s/certbot.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: certbot
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /.well-known/acme-challenge/
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: certbot
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: certbot
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: A.B.C.D
|
||||
ports:
|
||||
- port: 8000
|
||||
protocol: TCP
|
||||
|
||||
11
k8s/cm-certificate.yaml
Normal file
11
k8s/cm-certificate.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: xyz.A.B.C.D.nip.io
|
||||
spec:
|
||||
secretName: xyz.A.B.C.D.nip.io
|
||||
dnsNames:
|
||||
- xyz.A.B.C.D.nip.io
|
||||
issuerRef:
|
||||
name: letsencrypt-staging
|
||||
kind: ClusterIssuer
|
||||
18
k8s/cm-clusterissuer.yaml
Normal file
18
k8s/cm-clusterissuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# Remember to update this if you use this manifest to obtain real certificates :)
|
||||
email: hello@example.com
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
# To use the production environment, use the following line instead:
|
||||
#server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: issuer-letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Note: apiextensions.k8s.io/v1beta1 is deprecated, and won't be served
|
||||
# in Kubernetes 1.22 and later versions. This YAML manifest is here just
|
||||
# for reference, but it's not intended to be used in modern trainings.
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
|
||||
@@ -4,6 +4,13 @@ metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
@@ -11,25 +18,4 @@ spec:
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
spec:
|
||||
required:
|
||||
- taste
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
|
||||
|
||||
37
k8s/coffee-3.yaml
Normal file
37
k8s/coffee-3.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: coffees.container.training
|
||||
spec:
|
||||
group: container.training
|
||||
scope: Namespaced
|
||||
names:
|
||||
plural: coffees
|
||||
singular: coffee
|
||||
kind: Coffee
|
||||
shortNames:
|
||||
- cof
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
required: [ spec ]
|
||||
properties:
|
||||
spec:
|
||||
type: object
|
||||
properties:
|
||||
taste:
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
type: string
|
||||
required: [ taste ]
|
||||
additionalPrinterColumns:
|
||||
- jsonPath: .spec.taste
|
||||
description: Subjective taste of that kind of coffee bean
|
||||
name: Taste
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
@@ -9,9 +9,9 @@ spec:
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: robusta
|
||||
name: excelsa
|
||||
spec:
|
||||
taste: stronger
|
||||
taste: fruity
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
@@ -23,7 +23,12 @@ spec:
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: excelsa
|
||||
name: robusta
|
||||
spec:
|
||||
taste: fruity
|
||||
|
||||
taste: stronger
|
||||
bitterness: high
|
||||
---
|
||||
kind: Coffee
|
||||
apiVersion: container.training/v1alpha1
|
||||
metadata:
|
||||
name: java
|
||||
|
||||
78
k8s/consul-1.yaml
Normal file
78
k8s/consul-1.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
# Basic Consul cluster using Cloud Auto-Join.
|
||||
# Caveats:
|
||||
# - no actual persistence
|
||||
# - scaling down to 1 will break the cluster
|
||||
# - pods may be colocated
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.11"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
@@ -1,5 +1,15 @@
|
||||
# Better Consul cluster.
|
||||
# There is still no actual persistence, but:
|
||||
# - podAntiaffinity prevents pod colocation
|
||||
# - clusters works when scaling down to 1 (thanks to lifecycle hook)
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
@@ -11,22 +21,16 @@ rules:
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -59,20 +63,22 @@ spec:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- consul
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.6"
|
||||
image: "consul:1.11"
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\""
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
@@ -80,7 +86,4 @@ spec:
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- consul leave
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
99
k8s/consul-3.yaml
Normal file
99
k8s/consul-3.yaml
Normal file
@@ -0,0 +1,99 @@
|
||||
# Even better Consul cluster.
|
||||
# That one uses a volumeClaimTemplate to achieve true persistence.
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: consul
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: consul
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: consul
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: consul
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: consul
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
ports:
|
||||
- port: 8500
|
||||
name: http
|
||||
selector:
|
||||
app: consul
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: consul
|
||||
spec:
|
||||
serviceName: consul
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: consul
|
||||
spec:
|
||||
serviceAccountName: consul
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: consul
|
||||
topologyKey: kubernetes.io/hostname
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: consul
|
||||
image: "consul:1.11"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /consul/data
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
args:
|
||||
- "agent"
|
||||
- "-bootstrap-expect=3"
|
||||
- "-retry-join=provider=k8s label_selector=\"app=consul\" namespace=\"$(NAMESPACE)\""
|
||||
- "-client=0.0.0.0"
|
||||
- "-data-dir=/consul/data"
|
||||
- "-server"
|
||||
- "-ui"
|
||||
lifecycle:
|
||||
preStop:
|
||||
exec:
|
||||
command: [ "sh", "-c", "consul leave" ]
|
||||
340
k8s/dashboard-insecure.yaml
Normal file
340
k8s/dashboard-insecure.yaml
Normal file
@@ -0,0 +1,340 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 443
|
||||
targetPort: http
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
- --enable-skip-login
|
||||
- --enable-insecure-login
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9090
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:insecure
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
325
k8s/dashboard-recommended.yaml
Normal file
325
k8s/dashboard-recommended.yaml
Normal file
@@ -0,0 +1,325 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
346
k8s/dashboard-with-token.yaml
Normal file
346
k8s/dashboard-with-token.yaml
Normal file
@@ -0,0 +1,346 @@
|
||||
# This file was generated with the script ./update-dashboard-yaml.sh.
|
||||
#
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard
|
||||
spec: {}
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
---
|
||||
apiVersion: v1
|
||||
data: null
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard-metrics
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard-metrics
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-key-holder
|
||||
- kubernetes-dashboard-certs
|
||||
- kubernetes-dashboard-csrf
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kubernetes-dashboard-settings
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- dashboard-metrics-scraper
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- heapster
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
- dashboard-metrics-scraper
|
||||
- http:dashboard-metrics-scraper
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
targetPort: https
|
||||
selector:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations: null
|
||||
labels:
|
||||
app.kubernetes.io/component: kubernetes-dashboard
|
||||
app.kubernetes.io/instance: kubernetes-dashboard
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
app.kubernetes.io/name: kubernetes-dashboard
|
||||
app.kubernetes.io/version: 2.5.0
|
||||
helm.sh/chart: kubernetes-dashboard-5.2.0
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --namespace=kubernetes-dashboard
|
||||
- --auto-generate-certificates
|
||||
- --sidecar-host=http://127.0.0.1:8000
|
||||
image: kubernetesui/dashboard:v2.5.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8443
|
||||
scheme: HTTPS
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: kubernetes-dashboard
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: kubernetes-dashboard-certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- image: kubernetesui/metrics-scraper:v1.0.7
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 8000
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
name: dashboard-metrics-scraper
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsGroup: 2001
|
||||
runAsUser: 1001
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: kubernetes-dashboard:cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: cluster-admin
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
name: fluentd
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: fluentd
|
||||
@@ -21,7 +21,7 @@ rules:
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: fluentd
|
||||
roleRef:
|
||||
|
||||
@@ -11,7 +11,7 @@ metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
@@ -41,7 +41,7 @@ rules:
|
||||
resources: ["elasticsearchclusters"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
@@ -55,13 +55,16 @@ subjects:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: elasticsearch-operator
|
||||
namespace: elasticsearch-operator
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: elasticsearch-operator
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
||||
30
k8s/event-node.yaml
Normal file
30
k8s/event-node.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: hello-
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
kind: Node
|
||||
apiVersion: v1
|
||||
name: kind-control-plane
|
||||
# Note: the uid should be the Node name (not the uid of the Node).
|
||||
# This might be specific to global objects.
|
||||
uid: kind-control-plane
|
||||
|
||||
type: Warning
|
||||
reason: NodeOverheat
|
||||
message: "Node temperature exceeds critical threshold"
|
||||
action: Hello
|
||||
source:
|
||||
component: thermal-probe
|
||||
#host: node1
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
36
k8s/event-pod.yaml
Normal file
36
k8s/event-pod.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
kind: Event
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
# One convention is to use <objectname>.<timestamp>,
|
||||
# where the timestamp is taken with a nanosecond
|
||||
# precision and expressed in hexadecimal.
|
||||
# Example: web-5dcb957ccc-fjvzc.164689730a36ec3d
|
||||
name: hello.1234567890
|
||||
# The label doesn't serve any purpose, except making
|
||||
# it easier to identify or delete that specific event.
|
||||
labels:
|
||||
container.training/test: ""
|
||||
|
||||
#eventTime: "2020-07-04T00:00:00.000000Z"
|
||||
#firstTimestamp: "2020-01-01T00:00:00.000000Z"
|
||||
#lastTimestamp: "2020-12-31T00:00:00.000000Z"
|
||||
#count: 42
|
||||
|
||||
involvedObject:
|
||||
### These 5 lines should be updated to refer to an object.
|
||||
### Make sure to put the correct "uid", because it is what
|
||||
### "kubectl describe" is using to gather relevant events.
|
||||
#apiVersion: v1
|
||||
#kind: Pod
|
||||
#name: magic-bean
|
||||
#namespace: blue
|
||||
#uid: 7f28fda8-6ef4-4580-8d87-b55721fcfc30
|
||||
|
||||
type: Normal
|
||||
reason: BackupSuccessful
|
||||
message: "Object successfully dumped to gitops repository"
|
||||
source:
|
||||
component: gitops-sync
|
||||
#reportingComponent: ""
|
||||
#reportingInstance: ""
|
||||
|
||||
@@ -52,7 +52,7 @@ data:
|
||||
- add_kubernetes_metadata:
|
||||
in_cluster: true
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: filebeat
|
||||
@@ -60,6 +60,9 @@ metadata:
|
||||
labels:
|
||||
k8s-app: filebeat
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: filebeat
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@@ -128,7 +131,7 @@ spec:
|
||||
path: /var/lib/filebeat-data
|
||||
type: DirectoryOrCreate
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: filebeat
|
||||
@@ -141,7 +144,7 @@ roleRef:
|
||||
name: filebeat
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: filebeat
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
@@ -11,4 +11,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
namespace: kube-system
|
||||
|
||||
34
k8s/hackthecluster.yaml
Normal file
34
k8s/hackthecluster.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: hackthecluster
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: hackthecluster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: hackthecluster
|
||||
spec:
|
||||
volumes:
|
||||
- name: slash
|
||||
hostPath:
|
||||
path: /
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: alpine
|
||||
image: alpine
|
||||
volumeMounts:
|
||||
- name: slash
|
||||
mountPath: /hostfs
|
||||
command:
|
||||
- sleep
|
||||
- infinity
|
||||
securityContext:
|
||||
#privileged: true
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_CHROOT
|
||||
@@ -27,7 +27,7 @@ spec:
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- "apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
- "mkdir -p /root/.ssh && apk update && apk add curl && curl https://github.com/jpetazzo.keys > /root/.ssh/authorized_keys"
|
||||
containers:
|
||||
- name: web
|
||||
image: nginx
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
global
|
||||
daemon
|
||||
maxconn 256
|
||||
|
||||
defaults
|
||||
mode tcp
|
||||
timeout connect 5000ms
|
||||
timeout client 50000ms
|
||||
timeout server 50000ms
|
||||
timeout connect 5s
|
||||
timeout client 50s
|
||||
timeout server 50s
|
||||
|
||||
frontend the-frontend
|
||||
listen very-basic-load-balancer
|
||||
bind *:80
|
||||
default_backend the-backend
|
||||
|
||||
backend the-backend
|
||||
server google.com-80 google.com:80 maxconn 32 check
|
||||
server ibm.fr-80 ibm.fr:80 maxconn 32 check
|
||||
server blue color.blue.svc:80
|
||||
server green color.green.svc:80
|
||||
|
||||
# Note: the services above must exist,
|
||||
# otherwise HAproxy won't start.
|
||||
|
||||
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
29
k8s/hpa-v2-pa-httplat.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
kind: HorizontalPodAutoscaler
|
||||
apiVersion: autoscaling/v2beta2
|
||||
metadata:
|
||||
name: rng
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: rng
|
||||
minReplicas: 1
|
||||
maxReplicas: 20
|
||||
behavior:
|
||||
scaleUp:
|
||||
stabilizationWindowSeconds: 60
|
||||
scaleDown:
|
||||
stabilizationWindowSeconds: 180
|
||||
metrics:
|
||||
- type: Object
|
||||
object:
|
||||
describedObject:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
name: httplat
|
||||
metric:
|
||||
name: httplat_latency_seconds
|
||||
target:
|
||||
type: Value
|
||||
value: 0.1
|
||||
|
||||
20
k8s/ingress-v1.yaml
Normal file
20
k8s/ingress-v1.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whatever
|
||||
port:
|
||||
number: 1234
|
||||
17
k8s/ingress-v1beta1.yaml
Normal file
17
k8s/ingress-v1beta1.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
#tls:
|
||||
#- secretName: whatever.A.B.C.D.nip.io
|
||||
# hosts:
|
||||
# - whatever.A.B.C.D.nip.io
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whatever
|
||||
spec:
|
||||
rules:
|
||||
- host: whatever.A.B.C.D.nip.io
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
backend:
|
||||
serviceName: whatever
|
||||
servicePort: 1234
|
||||
1
k8s/ingress.yaml
Symbolic link
1
k8s/ingress.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
ingress-v1beta1.yaml
|
||||
@@ -1,360 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-csrf
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
data:
|
||||
csrf: ""
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-key-holder
|
||||
namespace: kubernetes-dashboard
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-settings
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster", "dashboard-metrics-scraper"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
rules:
|
||||
# Allow Metrics Scraper to get metrics from the Metrics server
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods", "nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubernetes-dashboard
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: kubernetesui/dashboard:v2.0.0-rc2
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=kubernetes-dashboard
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
- --enable-skip-login
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
targetPort: 8000
|
||||
selector:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
|
||||
---
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
name: dashboard-metrics-scraper
|
||||
namespace: kubernetes-dashboard
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: dashboard-metrics-scraper
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: dashboard-metrics-scraper
|
||||
image: kubernetesui/metrics-scraper:v1.0.2
|
||||
ports:
|
||||
- containerPort: 8000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTP
|
||||
path: /
|
||||
port: 8000
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 1001
|
||||
runAsGroup: 2001
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
nodeSelector:
|
||||
"beta.kubernetes.io/os": linux
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
volumes:
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- sh
|
||||
- -c
|
||||
- apk add --no-cache socat && socat TCP-LISTEN:80,fork,reuseaddr OPENSSL:kubernetes-dashboard.kubernetes-dashboard:443,verify=0
|
||||
image: alpine
|
||||
name: dashboard
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: dashboard
|
||||
name: dashboard
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: dashboard
|
||||
type: NodePort
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: insecure-dashboard
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kubernetes-dashboard
|
||||
@@ -1,162 +0,0 @@
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# ------------------- Dashboard Secret ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard-certs
|
||||
namespace: kube-system
|
||||
type: Opaque
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service Account ------------------- #
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Role & Role Binding ------------------- #
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
|
||||
verbs: ["get", "update", "delete"]
|
||||
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["kubernetes-dashboard-settings"]
|
||||
verbs: ["get", "update"]
|
||||
# Allow Dashboard to get metrics from heapster.
|
||||
- apiGroups: [""]
|
||||
resources: ["services"]
|
||||
resourceNames: ["heapster"]
|
||||
verbs: ["proxy"]
|
||||
- apiGroups: [""]
|
||||
resources: ["services/proxy"]
|
||||
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubernetes-dashboard-minimal
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubernetes-dashboard-minimal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Deployment ------------------- #
|
||||
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
revisionHistoryLimit: 10
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
# - --apiserver-host=http://my-address:port
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
# Create on-disk volume to store exec logs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
volumes:
|
||||
- name: kubernetes-dashboard-certs
|
||||
secret:
|
||||
secretName: kubernetes-dashboard-certs
|
||||
- name: tmp-volume
|
||||
emptyDir: {}
|
||||
serviceAccountName: kubernetes-dashboard
|
||||
# Comment the following tolerations if Dashboard must not be deployed on master
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
||||
---
|
||||
# ------------------- Dashboard Service ------------------- #
|
||||
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
name: kubernetes-dashboard
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
k8s-app: kubernetes-dashboard
|
||||
28
k8s/kyverno-ingress-domain-name-1.yaml
Normal file
28
k8s/kyverno-ingress-domain-name-1.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: ingress-domain-name
|
||||
spec:
|
||||
rules:
|
||||
- name: create-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
generate:
|
||||
kind: Ingress
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: "{{request.object.metadata.name}}"
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
32
k8s/kyverno-ingress-domain-name-2a.yaml
Normal file
32
k8s/kyverno-ingress-domain-name-2a.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: ingress-domain-name
|
||||
spec:
|
||||
rules:
|
||||
- name: create-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
preconditions:
|
||||
- key: "{{request.object.spec.ports[0].name}}"
|
||||
operator: Equals
|
||||
value: http
|
||||
generate:
|
||||
kind: Ingress
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: "{{request.object.metadata.name}}"
|
||||
port:
|
||||
name: http
|
||||
path: /
|
||||
pathType: Prefix
|
||||
32
k8s/kyverno-ingress-domain-name-2b.yaml
Normal file
32
k8s/kyverno-ingress-domain-name-2b.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: ingress-domain-name
|
||||
spec:
|
||||
rules:
|
||||
- name: create-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
preconditions:
|
||||
- key: http
|
||||
operator: In
|
||||
value: "{{request.object.spec.ports[*].name}}"
|
||||
generate:
|
||||
kind: Ingress
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: "{{request.object.metadata.name}}"
|
||||
port:
|
||||
name: http
|
||||
path: /
|
||||
pathType: Prefix
|
||||
34
k8s/kyverno-ingress-domain-name-2c.yaml
Normal file
34
k8s/kyverno-ingress-domain-name-2c.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
# Note: this policy uses the operator "AnyIn", which was introduced in Kyverno 1.6.
|
||||
# (This policy won't work with Kyverno 1.5!)
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: ingress-domain-name
|
||||
spec:
|
||||
rules:
|
||||
- name: create-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
preconditions:
|
||||
- key: "{{request.object.spec.ports[*].port}}"
|
||||
operator: AnyIn
|
||||
value: [ 80 ]
|
||||
generate:
|
||||
kind: Ingress
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.A.B.C.D.nip.io"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: "{{request.object.metadata.name}}"
|
||||
port:
|
||||
name: http
|
||||
path: /
|
||||
pathType: Prefix
|
||||
37
k8s/kyverno-ingress-domain-name-3.yaml
Normal file
37
k8s/kyverno-ingress-domain-name-3.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: ingress-domain-name
|
||||
spec:
|
||||
rules:
|
||||
- name: create-ingress
|
||||
context:
|
||||
- name: configmap
|
||||
configMap:
|
||||
name: ingress-domain-name
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
preconditions:
|
||||
- key: "{{request.object.spec.ports[0].name}}"
|
||||
operator: Equals
|
||||
value: http
|
||||
generate:
|
||||
kind: Ingress
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
spec:
|
||||
rules:
|
||||
- host: "{{request.object.metadata.name}}.{{request.object.metadata.namespace}}.{{configmap.data.domain}}"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: "{{request.object.metadata.name}}"
|
||||
port:
|
||||
name: http
|
||||
path: /
|
||||
pathType: Prefix
|
||||
63
k8s/kyverno-namespace-setup.yaml
Normal file
63
k8s/kyverno-namespace-setup.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: setup-namespace
|
||||
spec:
|
||||
rules:
|
||||
- name: setup-limitrange
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
- type: Container
|
||||
min:
|
||||
cpu: 0.1
|
||||
memory: 0.1
|
||||
max:
|
||||
cpu: 2
|
||||
memory: 2Gi
|
||||
default:
|
||||
cpu: 0.25
|
||||
memory: 500Mi
|
||||
defaultRequest:
|
||||
cpu: 0.25
|
||||
memory: 250Mi
|
||||
- name: setup-resourcequota
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ResourceQuota
|
||||
name: default-resourcequota
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: "10"
|
||||
requests.memory: 10Gi
|
||||
limits.cpu: "20"
|
||||
limits.memory: 20Gi
|
||||
- name: setup-networkpolicy
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-networkpolicy
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
|
||||
22
k8s/kyverno-pod-color-1.yaml
Normal file
22
k8s/kyverno-pod-color-1.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-1
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: ensure-pod-color-is-valid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
selector:
|
||||
matchExpressions:
|
||||
- key: color
|
||||
operator: Exists
|
||||
- key: color
|
||||
operator: NotIn
|
||||
values: [ red, green, blue ]
|
||||
validate:
|
||||
message: "If it exists, the label color must be red, green, or blue."
|
||||
deny: {}
|
||||
31
k8s/kyverno-pod-color-2.yaml
Normal file
31
k8s/kyverno-pod-color-2.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
preconditions:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be changed."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: "{{ request.oldObject.metadata.labels.color }}"
|
||||
|
||||
28
k8s/kyverno-pod-color-3.yaml
Normal file
28
k8s/kyverno-pod-color-3.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: pod-color-policy-3
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: prevent-color-change
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
preconditions:
|
||||
- key: "{{ request.operation }}"
|
||||
operator: Equals
|
||||
value: UPDATE
|
||||
- key: "{{ request.oldObject.metadata.labels.color }}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
- key: "{{ request.object.metadata.labels.color }}"
|
||||
operator: Equals
|
||||
value: ""
|
||||
validate:
|
||||
message: "Once label color has been added, it cannot be removed."
|
||||
deny:
|
||||
conditions:
|
||||
|
||||
46
k8s/kyverno-tls-for-ingress.yaml
Normal file
46
k8s/kyverno-tls-for-ingress.yaml
Normal file
@@ -0,0 +1,46 @@
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tls-for-ingress
|
||||
spec:
|
||||
rules:
|
||||
- name: create-role
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Certificate
|
||||
generate:
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
rules:
|
||||
- verbs:
|
||||
- get
|
||||
apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
resourceNames:
|
||||
- "{{request.object.metadata.name}}"
|
||||
- name: create-rolebinding
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Certificate
|
||||
generate:
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
name: "{{request.object.metadata.name}}"
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
data:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: "{{request.object.metadata.name}}"
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: "{{request.object.metadata.namespace}}"
|
||||
|
||||
@@ -1,49 +1,50 @@
|
||||
# This is a local copy of:
|
||||
# https://github.com/rancher/local-path-provisioner/blob/master/deploy/local-path-storage.yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: local-path-provisioner-role
|
||||
namespace: local-path-storage
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints", "persistentvolumes", "pods"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "nodes", "persistentvolumeclaims", "configmaps" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "endpoints", "persistentvolumes", "pods" ]
|
||||
verbs: [ "*" ]
|
||||
- apiGroups: [ "" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "create", "patch" ]
|
||||
- apiGroups: [ "storage.k8s.io" ]
|
||||
resources: [ "storageclasses" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: local-path-provisioner-bind
|
||||
namespace: local-path-storage
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: local-path-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
- kind: ServiceAccount
|
||||
name: local-path-provisioner-service-account
|
||||
namespace: local-path-storage
|
||||
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -62,27 +63,28 @@ spec:
|
||||
spec:
|
||||
serviceAccountName: local-path-provisioner-service-account
|
||||
containers:
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.8
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: local-path-provisioner
|
||||
image: rancher/local-path-provisioner:v0.0.19
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- local-path-provisioner
|
||||
- --debug
|
||||
- start
|
||||
- --config
|
||||
- /etc/config/config.json
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/config/
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: local-path-config
|
||||
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
@@ -91,6 +93,7 @@ metadata:
|
||||
provisioner: rancher.io/local-path
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
@@ -99,12 +102,59 @@ metadata:
|
||||
namespace: local-path-storage
|
||||
data:
|
||||
config.json: |-
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
{
|
||||
"nodePathMap":[
|
||||
{
|
||||
"node":"DEFAULT_PATH_FOR_NON_LISTED_NODES",
|
||||
"paths":["/opt/local-path-provisioner"]
|
||||
}
|
||||
]
|
||||
}
|
||||
setup: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
mkdir -m 0777 -p ${absolutePath}
|
||||
teardown: |-
|
||||
#!/bin/sh
|
||||
while getopts "m:s:p:" opt
|
||||
do
|
||||
case $opt in
|
||||
p)
|
||||
absolutePath=$OPTARG
|
||||
;;
|
||||
s)
|
||||
sizeInBytes=$OPTARG
|
||||
;;
|
||||
m)
|
||||
volMode=$OPTARG
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -rf ${absolutePath}
|
||||
helperPod.yaml: |-
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: helper-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: helper-pod
|
||||
image: busybox
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,32 +1,61 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# This file is https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
|
||||
# But with the following arguments added to metrics-server:
|
||||
# args:
|
||||
# - --kubelet-insecure-tls
|
||||
# - --metric-resolution=5s
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: system:aggregated-metrics-reader
|
||||
labels:
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rules:
|
||||
- apiGroups: ["metrics.k8s.io"]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
rbac.authorization.k8s.io/aggregate-to-admin: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-edit: "true"
|
||||
rbac.authorization.k8s.io/aggregate-to-view: "true"
|
||||
name: system:aggregated-metrics-reader
|
||||
rules:
|
||||
- apiGroups:
|
||||
- metrics.k8s.io
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
@@ -38,95 +67,26 @@ subjects:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
# mount in tmp so we can safely use from-scratch images and/or read-only containers
|
||||
- name: tmp-dir
|
||||
emptyDir: {}
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: k8s.gcr.io/metrics-server-amd64:v0.3.3
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: tmp-dir
|
||||
mountPath: /tmp
|
||||
args:
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -136,3 +96,98 @@ subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
ports:
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --cert-dir=/tmp
|
||||
- --secure-port=4443
|
||||
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
|
||||
- --kubelet-use-node-status-port
|
||||
- --kubelet-insecure-tls
|
||||
- --metric-resolution=5s
|
||||
image: k8s.gcr.io/metrics-server/metrics-server:v0.4.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /livez
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
name: metrics-server
|
||||
ports:
|
||||
- containerPort: 4443
|
||||
name: https
|
||||
protocol: TCP
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: https
|
||||
scheme: HTTPS
|
||||
periodSeconds: 10
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: tmp-dir
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
volumes:
|
||||
- emptyDir: {}
|
||||
name: tmp-dir
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
group: metrics.k8s.io
|
||||
groupPriorityMinimum: 100
|
||||
insecureSkipTLSVerify: true
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
version: v1beta1
|
||||
versionPriority: 100
|
||||
|
||||
20
k8s/mounter.yaml
Normal file
20
k8s/mounter.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: mounter-
|
||||
labels:
|
||||
container.training/mounter: ""
|
||||
spec:
|
||||
volumes:
|
||||
- name: pvc
|
||||
persistentVolumeClaim:
|
||||
claimName: my-pvc-XYZ45
|
||||
containers:
|
||||
- name: mounter
|
||||
image: alpine
|
||||
stdin: true
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: pvc
|
||||
mountPath: /pvc
|
||||
workingDir: /pvc
|
||||
@@ -3,8 +3,7 @@ apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: deny-from-other-namespaces
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- podSelector: {}
|
||||
|
||||
@@ -14,7 +14,7 @@ spec:
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine
|
||||
command: [ "sh", "-c", "apk add --no-cache git && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
command: [ "sh", "-c", "apk add git && sleep 5 && git clone https://github.com/octocat/Spoon-Knife /www" ]
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /www/
|
||||
|
||||
24
k8s/openebs-pod.yaml
Normal file
24
k8s/openebs-pod.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: openebs-local-hostpath-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: storage
|
||||
persistentVolumeClaim:
|
||||
claimName: local-hostpath-pvc
|
||||
containers:
|
||||
- name: better
|
||||
image: alpine
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
while true; do
|
||||
echo "$(date) [$(hostname)] Kubernetes is better with PVs." >> /mnt/storage/greet.txt
|
||||
sleep $(($RANDOM % 5 + 20))
|
||||
done
|
||||
volumeMounts:
|
||||
- mountPath: /mnt/storage
|
||||
name: storage
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,7 +22,10 @@ spec:
|
||||
command: ["sh", "-c", "if [ -d /vol/lost+found ]; then rmdir /vol/lost+found; fi"]
|
||||
containers:
|
||||
- name: postgres
|
||||
image: postgres:11
|
||||
image: postgres:12
|
||||
env:
|
||||
- name: POSTGRES_HOST_AUTH_METHOD
|
||||
value: trust
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/postgresql/data
|
||||
name: postgres
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: runtime/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: runtime/default
|
||||
name: restricted
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
20
k8s/pv.yaml
Normal file
20
k8s/pv.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: my-pv-
|
||||
labels:
|
||||
container.training/pv: ""
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadWriteMany
|
||||
capacity:
|
||||
storage: 1G
|
||||
hostPath:
|
||||
path: /tmp/my-pv
|
||||
#storageClassName: my-sc
|
||||
#claimRef:
|
||||
# kind: PersistentVolumeClaim
|
||||
# apiVersion: v1
|
||||
# namespace: default
|
||||
# name: my-pvc-XYZ45
|
||||
13
k8s/pvc.yaml
Normal file
13
k8s/pvc.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
generateName: my-pvc-
|
||||
labels:
|
||||
container.training/pvc: ""
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1G
|
||||
#storageClassName: my-sc
|
||||
147
k8s/rainbow.yaml
Normal file
147
k8s/rainbow.yaml
Normal file
@@ -0,0 +1,147 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: blue
|
||||
labels:
|
||||
app: rainbow
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: blue
|
||||
name: color
|
||||
namespace: blue
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rainbow
|
||||
color: blue
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: blue
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: blue
|
||||
name: color
|
||||
namespace: blue
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rainbow
|
||||
color: blue
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: green
|
||||
labels:
|
||||
app: rainbow
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: green
|
||||
name: color
|
||||
namespace: green
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rainbow
|
||||
color: green
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: green
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: green
|
||||
name: color
|
||||
namespace: green
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rainbow
|
||||
color: green
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: red
|
||||
labels:
|
||||
app: rainbow
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: red
|
||||
name: color
|
||||
namespace: red
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rainbow
|
||||
color: red
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: red
|
||||
spec:
|
||||
containers:
|
||||
- image: jpetazzo/color
|
||||
name: color
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: rainbow
|
||||
color: red
|
||||
name: color
|
||||
namespace: red
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: rainbow
|
||||
color: red
|
||||
type: ClusterIP
|
||||
@@ -1,28 +1,17 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
deployment.kubernetes.io/revision: "2"
|
||||
creationTimestamp: null
|
||||
generation: 1
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /apis/extensions/v1beta1/namespaces/kube-system/deployments/socat
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: socat
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
spec:
|
||||
@@ -34,34 +23,19 @@ spec:
|
||||
image: alpine
|
||||
imagePullPolicy: Always
|
||||
name: socat
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
dnsPolicy: ClusterFirst
|
||||
restartPolicy: Always
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
terminationGracePeriodSeconds: 30
|
||||
status: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: socat
|
||||
name: socat
|
||||
namespace: kube-system
|
||||
selfLink: /api/v1/namespaces/kube-system/services/socat
|
||||
spec:
|
||||
externalTrafficPolicy: Cluster
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: socat
|
||||
sessionAffinity: None
|
||||
type: NodePort
|
||||
status:
|
||||
loadBalancer: {}
|
||||
|
||||
42
k8s/tilt-registry.yaml
Normal file
42
k8s/tilt-registry.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: tilt-registry
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
name: tilt-registry
|
||||
namespace: tilt-registry
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: tilt-registry
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
spec:
|
||||
containers:
|
||||
- image: registry
|
||||
name: registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: tilt-registry
|
||||
name: tilt-registry
|
||||
namespace: tilt-registry
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
targetPort: 5000
|
||||
nodePort: 30555
|
||||
selector:
|
||||
app: tilt-registry
|
||||
type: NodePort
|
||||
87
k8s/traefik-v1.yaml
Normal file
87
k8s/traefik-v1.yaml
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
114
k8s/traefik-v2.yaml
Normal file
114
k8s/traefik-v2.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:v2.5
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --accesslog
|
||||
- --api
|
||||
- --api.insecure
|
||||
- --log.level=INFO
|
||||
- --metrics.prometheus
|
||||
- --providers.kubernetesingress
|
||||
- --entrypoints.http.Address=:80
|
||||
- --entrypoints.https.Address=:443
|
||||
- --entrypoints.https.http.tls.certResolver=default
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
- ingressclasses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: IngressClass
|
||||
apiVersion: networking.k8s.io/v1
|
||||
metadata:
|
||||
name: traefik
|
||||
annotations:
|
||||
ingressclass.kubernetes.io/is-default-class: "true"
|
||||
spec:
|
||||
controller: traefik.io/ingress-controller
|
||||
103
k8s/traefik.yaml
103
k8s/traefik.yaml
@@ -1,103 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: traefik-ingress-lb
|
||||
name: traefik-ingress-lb
|
||||
spec:
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
hostNetwork: true
|
||||
serviceAccountName: traefik-ingress-controller
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- image: traefik:1.7
|
||||
name: traefik-ingress-lb
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
hostPort: 80
|
||||
- name: admin
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- --api
|
||||
- --kubernetes
|
||||
- --logLevel=INFO
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: traefik-ingress-service
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: traefik-ingress-lb
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 80
|
||||
name: web
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
name: admin
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: traefik-ingress-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: traefik-ingress-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: traefik-ingress-controller
|
||||
namespace: kube-system
|
||||
1
k8s/traefik.yaml
Symbolic link
1
k8s/traefik.yaml
Symbolic link
@@ -0,0 +1 @@
|
||||
traefik-v2.yaml
|
||||
73
k8s/update-dashboard-yaml.sh
Executable file
73
k8s/update-dashboard-yaml.sh
Executable file
@@ -0,0 +1,73 @@
|
||||
#!/bin/sh
|
||||
|
||||
banner() {
|
||||
echo "# This file was generated with the script $0."
|
||||
echo "#"
|
||||
}
|
||||
|
||||
create_namespace() {
|
||||
# 'helm template --namespace ... --create-namespace'
|
||||
# doesn't create the namespace, so we need to create it.
|
||||
# https://github.com/helm/helm/issues/9813
|
||||
echo ---
|
||||
kubectl create namespace kubernetes-dashboard \
|
||||
-o yaml --dry-run=client
|
||||
echo ---
|
||||
}
|
||||
|
||||
add_namespace() {
|
||||
# 'helm template --namespace ...' doesn't add namespace information,
|
||||
# so we do it with this convenient filter instead.
|
||||
# https://github.com/helm/helm/issues/10737
|
||||
kubectl create -f- -o yaml --dry-run=client --namespace kubernetes-dashboard
|
||||
}
|
||||
|
||||
(
|
||||
banner
|
||||
create_namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
--set "extraArgs={--enable-skip-login,--enable-insecure-login}" \
|
||||
--set metricsScraper.enabled=true \
|
||||
--set protocolHttp=true \
|
||||
--set service.type=NodePort \
|
||||
| add_namespace
|
||||
echo ---
|
||||
kubectl create clusterrolebinding kubernetes-dashboard:insecure \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kubernetes-dashboard:kubernetes-dashboard \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
) > dashboard-insecure.yaml
|
||||
|
||||
(
|
||||
banner
|
||||
create_namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
--set metricsScraper.enabled=true \
|
||||
| add_namespace
|
||||
) > dashboard-recommended.yaml
|
||||
|
||||
(
|
||||
banner
|
||||
create_namespace
|
||||
helm template kubernetes-dashboard kubernetes-dashboard \
|
||||
--repo https://kubernetes.github.io/dashboard/ \
|
||||
--create-namespace --namespace kubernetes-dashboard \
|
||||
--set metricsScraper.enabled=true \
|
||||
--set service.type=NodePort \
|
||||
| add_namespace
|
||||
echo ---
|
||||
kubectl create clusterrolebinding kubernetes-dashboard:cluster-admin \
|
||||
--clusterrole=cluster-admin \
|
||||
--serviceaccount=kubernetes-dashboard:cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
echo ---
|
||||
kubectl create serviceaccount -n kubernetes-dashboard cluster-admin \
|
||||
-o yaml --dry-run=client \
|
||||
#
|
||||
) > dashboard-with-token.yaml
|
||||
@@ -8,24 +8,24 @@ metadata:
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
rules:
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ create ]
|
||||
- apiGroups: [ certificates.k8s.io ]
|
||||
resourceNames: [ users:jean.doe ]
|
||||
resourceNames: [ user=jean.doe ]
|
||||
resources: [ certificatesigningrequests ]
|
||||
verbs: [ get, create, delete, watch ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: users:jean.doe
|
||||
name: user=jean.doe
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: jean.doe
|
||||
@@ -3,8 +3,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node2
|
||||
annotations:
|
||||
node: node2
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -26,8 +24,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node3
|
||||
annotations:
|
||||
node: node3
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
@@ -49,8 +45,6 @@ apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: consul-node4
|
||||
annotations:
|
||||
node: node4
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
|
||||
13
prepare-eks/10_create_cluster.sh
Executable file
13
prepare-eks/10_create_cluster.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
# Create an EKS cluster.
|
||||
# This is not idempotent (each time you run it, it creates a new cluster).
|
||||
|
||||
eksctl create cluster \
|
||||
--node-type=t3.large \
|
||||
--nodes-max=10 \
|
||||
--alb-ingress-access \
|
||||
--asg-access \
|
||||
--ssh-access \
|
||||
--with-oidc \
|
||||
#
|
||||
|
||||
32
prepare-eks/20_create_users.sh
Executable file
32
prepare-eks/20_create_users.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/bin/sh
|
||||
# For each user listed in "users.txt", create an IAM user.
|
||||
# Also create AWS API access keys, and store them in "users.keys".
|
||||
# This is idempotent (you can run it multiple times, it will only
|
||||
# create the missing users). However, it will not remove users.
|
||||
# Note that you can remove users from "users.keys" (or even wipe
|
||||
# that file out entirely) and then this script will delete their
|
||||
# keys and generate new keys for them (and add the new keys to
|
||||
# "users.keys".)
|
||||
|
||||
echo "Getting list of existing users ..."
|
||||
aws iam list-users --output json | jq -r .Users[].UserName > users.tmp
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
if ! grep -qw $U users.tmp; then
|
||||
echo "Creating user $U..."
|
||||
aws iam create-user --user-name=$U \
|
||||
--tags=Key=container.training,Value=1
|
||||
fi
|
||||
if ! grep -qw $U users.keys; then
|
||||
echo "Listing keys for user $U..."
|
||||
KEYS=$(aws iam list-access-keys --user=$U | jq -r .AccessKeyMetadata[].AccessKeyId)
|
||||
for KEY in $KEYS; do
|
||||
echo "Deleting key $KEY for user $U..."
|
||||
aws iam delete-access-key --user=$U --access-key-id=$KEY
|
||||
done
|
||||
echo "Creating access key for user $U..."
|
||||
aws iam create-access-key --user=$U --output json \
|
||||
| jq -r '.AccessKey | [ .UserName, .AccessKeyId, .SecretAccessKey ] | @tsv' \
|
||||
>> users.keys
|
||||
fi
|
||||
done
|
||||
51
prepare-eks/30_create_or_update_policy.sh
Executable file
51
prepare-eks/30_create_or_update_policy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM policy to authorize users to do "aws eks update-kubeconfig".
|
||||
# This is idempotent, which allows to update the policy document below if
|
||||
# you want the users to do other things as well.
|
||||
# Note that each time you run this script, it will actually create a new
|
||||
# version of the policy, set that version as the default version, and
|
||||
# remove all non-default versions. (Because you can only have up to
|
||||
# 5 versions of a given policy, so you need to clean them up.)
|
||||
# After running that script, you will want to attach the policy to our
|
||||
# users (check the other scripts in that directory).
|
||||
|
||||
POLICY_NAME=user.container.training
|
||||
POLICY_DOC='{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Action": [
|
||||
"eks:DescribeCluster"
|
||||
],
|
||||
"Resource": "arn:aws:eks:*",
|
||||
"Effect": "Allow"
|
||||
}
|
||||
]
|
||||
}'
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
aws iam create-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--policy-document "$POLICY_DOC" \
|
||||
--set-as-default
|
||||
|
||||
# For reference, the command below creates a policy without versioning:
|
||||
#aws iam create-policy \
|
||||
#--policy-name user.container.training \
|
||||
#--policy-document "$JSON"
|
||||
|
||||
for VERSION in $(
|
||||
aws iam list-policy-versions \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--query 'Versions[?!IsDefaultVersion].VersionId' \
|
||||
--output text)
|
||||
do
|
||||
aws iam delete-policy-version \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME \
|
||||
--version-id "$VERSION"
|
||||
done
|
||||
|
||||
# For reference, the command below shows all users using the policy:
|
||||
#aws iam list-entities-for-policy \
|
||||
#--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
14
prepare-eks/40_attach_policy.sh
Executable file
14
prepare-eks/40_attach_policy.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
# Attach our user policy to all the users defined in "users.txt".
|
||||
# This should be idempotent, because attaching the same policy
|
||||
# to the same user multiple times doesn't do anything.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
POLICY_NAME=user.container.training
|
||||
|
||||
for U in $(cat users.txt); do
|
||||
echo "Attaching policy to user $U ..."
|
||||
aws iam attach-user-policy \
|
||||
--user-name $U \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
done
|
||||
24
prepare-eks/50_aws_auth.sh
Executable file
24
prepare-eks/50_aws_auth.sh
Executable file
@@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# Update the aws-auth ConfigMap to map our IAM users to Kubernetes users.
|
||||
# Each user defined in "users.txt" will be mapped to a Kubernetes user
|
||||
# with the same name, and put in the "container.training" group, too.
|
||||
# This is idempotent.
|
||||
# WARNING: this will wipe out the mapUsers component of the aws-auth
|
||||
# ConfigMap, removing all users that aren't in "users.txt".
|
||||
# It won't touch mapRoles, so it shouldn't break the role mappings
|
||||
# put in place by EKS.
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
|
||||
rm -f users.map
|
||||
for U in $(cat users.txt); do
|
||||
echo "\
|
||||
- userarn: arn:aws:iam::$ACCOUNT:user/$U
|
||||
username: $U
|
||||
groups: [ container.training ]\
|
||||
" >> users.map
|
||||
done
|
||||
|
||||
kubectl create --namespace=kube-system configmap aws-auth \
|
||||
--dry-run=client --from-file=mapUsers=users.map -o yaml \
|
||||
| kubectl apply -f-
|
||||
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
65
prepare-eks/60_setup_rbac_and_ns.sh
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
# Create a shared Kubernetes Namespace ("container-training") as well as
|
||||
# individual namespaces for every user in "users.txt", and set up a bunch
|
||||
# of permissions.
|
||||
# Specifically:
|
||||
# - each user gets "view" permissions in the "default" Namespace
|
||||
# - each user gets "edit" permissions in the "container-training" Namespace
|
||||
# - each user gets permissions to list Nodes and Namespaces
|
||||
# - each user gets "admin" permissions in their personal Namespace
|
||||
# Note that since Kubernetes Namespaces can't have dots in their names,
|
||||
# if a user has dots, dots will be mapped to dashes.
|
||||
# So user "ada.lovelace" will get namespace "ada-lovelace".
|
||||
# This is kind of idempotent (but will raise a bunch of errors for objects
|
||||
# that already exist).
|
||||
# TODO: if this needs to evolve, replace all the "create" operations by
|
||||
# "apply" operations. But this is good enough for now.
|
||||
|
||||
kubectl create rolebinding --namespace default container.training \
|
||||
--group=container.training --clusterrole=view
|
||||
|
||||
kubectl create clusterrole view-nodes \
|
||||
--verb=get,list,watch --resource=node
|
||||
kubectl create clusterrolebinding view-nodes \
|
||||
--group=container.training --clusterrole=view-nodes
|
||||
|
||||
kubectl create clusterrole view-namespaces \
|
||||
--verb=get,list,watch --resource=namespace
|
||||
kubectl create clusterrolebinding view-namespaces \
|
||||
--group=container.training --clusterrole=view-namespaces
|
||||
|
||||
kubectl create namespace container-training
|
||||
kubectl create rolebinding --namespace container-training edit \
|
||||
--group=container.training --clusterrole=edit
|
||||
|
||||
# Note: API calls to EKS tend to be fairly slow. To optimize things a bit,
|
||||
# instead of running "kubectl" N times, we generate a bunch of YAML and
|
||||
# apply it. It will still generate a lot of API calls but it's much faster
|
||||
# than calling "kubectl" N times. It might be possible to make this even
|
||||
# faster by generating a "kind: List" (I don't know if this would issue
|
||||
# a single API calls or multiple ones; TBD!)
|
||||
for U in $(cat users.txt); do
|
||||
NS=$(echo $U | tr . -)
|
||||
cat <<EOF
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: $NS
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: admin
|
||||
namespace: $NS
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: $U
|
||||
EOF
|
||||
done | kubectl create -f-
|
||||
|
||||
76
prepare-eks/70_oidc.sh
Executable file
76
prepare-eks/70_oidc.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/sh
|
||||
# Create an IAM role to be used by a Kubernetes ServiceAccount.
|
||||
# The role isn't given any permissions yet (this has to be done by
|
||||
# another script in this series), but a properly configured Pod
|
||||
# should still be able to execute "aws sts get-caller-identity"
|
||||
# and confirm that it's using that role.
|
||||
# This requires the cluster to have an attached OIDC provider.
|
||||
# This should be the case if the cluster has been created with
|
||||
# the scripts in this directory; otherwise, this can be done with
|
||||
# the subsequent command, which is idempotent:
|
||||
# eksctl utils associate-iam-oidc-provider --cluster cluster-name-12341234 --approve
|
||||
# The policy document used below will authorize all ServiceAccounts
|
||||
# in the "container-training" Namespace to use that role.
|
||||
# This script will also annotate the container-training:default
|
||||
# ServiceAccount so that it can use that role.
|
||||
# This script is not quite idempotent: if you want to use a new
|
||||
# trust policy, some work will be required. (You can delete the role,
|
||||
# but that requires detaching the associated policies. There might also
|
||||
# be a way to update the trust policy directly; we didn't investigate this
|
||||
# further at this point.)
|
||||
|
||||
if [ "$1" ]; then
|
||||
CLUSTER="$1"
|
||||
else
|
||||
echo "Please indicate cluster to use. Available clusters:"
|
||||
aws eks list-clusters --output table
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
OIDC=$(aws eks describe-cluster --name $CLUSTER --query cluster.identity.oidc.issuer --output text | cut -d/ -f3-)
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
TRUST_POLICY=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::${ACCOUNT}:oidc-provider/${OIDC}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringLike": {
|
||||
"${OIDC}:sub": ["system:serviceaccount:container-training:*"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-role \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--assume-role-policy-document "$TRUST_POLICY"
|
||||
|
||||
kubectl annotate serviceaccounts \
|
||||
--namespace container-training default \
|
||||
"eks.amazonaws.com/role-arn=arn:aws:iam::$ACCOUNT:role/$ROLE_NAME" \
|
||||
--overwrite
|
||||
|
||||
exit
|
||||
|
||||
# Here are commands to delete the role:
|
||||
for POLICY_ARN in $(aws iam list-attached-role-policies --role-name $ROLE_NAME --query 'AttachedPolicies[*].PolicyArn' --output text); do aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $POLICY_ARN; done
|
||||
aws iam delete-role --role-name $ROLE_NAME
|
||||
|
||||
# Merging the policy with the existing policies:
|
||||
{
|
||||
aws iam get-role --role-name s3-reader-container-training | jq -r .Role.AssumeRolePolicyDocument.Statement[]
|
||||
echo "$TRUST_POLICY" | jq -r .Statement[]
|
||||
} | jq -s '{"Version": "2012-10-17", "Statement": .}' > /tmp/policy.json
|
||||
aws iam update-assume-role-policy \
|
||||
--role-name $ROLE_NAME \
|
||||
--policy-document file:///tmp/policy.json
|
||||
54
prepare-eks/80_s3_bucket.sh
Executable file
54
prepare-eks/80_s3_bucket.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/sh
|
||||
# Create an S3 bucket with two objects in it:
|
||||
# - public.txt (world-readable)
|
||||
# - private.txt (private)
|
||||
# Also create an IAM policy granting read-only access to the bucket
|
||||
# (and therefore, to the private object).
|
||||
# Finally, attach the policy to an IAM role (for instance, the role
|
||||
# created by another script in this directory).
|
||||
# This isn't idempotent, but it can be made idempotent by replacing the
|
||||
# "aws iam create-policy" call with "aws iam create-policy-version" and
|
||||
# a bit of extra elbow grease. (See other scripts in this directory for
|
||||
# an example).
|
||||
|
||||
ACCOUNT=$(aws sts get-caller-identity | jq -r .Account)
|
||||
BUCKET=container.training
|
||||
ROLE_NAME=s3-reader-container-training
|
||||
POLICY_NAME=s3-reader-container-training
|
||||
POLICY_DOC=$(envsubst <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject*"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::$BUCKET",
|
||||
"arn:aws:s3:::$BUCKET/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
aws iam create-policy \
|
||||
--policy-name $POLICY_NAME \
|
||||
--policy-doc "$POLICY_DOC"
|
||||
|
||||
aws s3 mb s3://container.training
|
||||
|
||||
echo "this is a public object" \
|
||||
| aws s3 cp - s3://container.training/public.txt \
|
||||
--acl public-read
|
||||
|
||||
echo "this is a private object" \
|
||||
| aws s3 cp - s3://container.training/private.txt \
|
||||
--acl private
|
||||
|
||||
aws iam attach-role-policy \
|
||||
--role-name "$ROLE_NAME" \
|
||||
--policy-arn arn:aws:iam::$ACCOUNT:policy/$POLICY_NAME
|
||||
50
prepare-eks/users.txt
Normal file
50
prepare-eks/users.txt
Normal file
@@ -0,0 +1,50 @@
|
||||
ada.lovelace
|
||||
adele.goldstine
|
||||
amanda.jones
|
||||
anita.borg
|
||||
ann.kiessling
|
||||
barbara.mcclintock
|
||||
beatrice.worsley
|
||||
bessie.blount
|
||||
betty.holberton
|
||||
beulah.henry
|
||||
carleen.hutchins
|
||||
caroline.herschel
|
||||
dona.bailey
|
||||
dorothy.hodgkin
|
||||
ellen.ochoa
|
||||
edith.clarke
|
||||
elisha.collier
|
||||
elizabeth.feinler
|
||||
emily.davenport
|
||||
erna.hoover
|
||||
frances.spence
|
||||
gertrude.blanch
|
||||
grace.hopper
|
||||
grete.hermann
|
||||
giuliana.tesoro
|
||||
harriet.tubman
|
||||
hedy.lamarr
|
||||
irma.wyman
|
||||
jane.goodall
|
||||
jean.bartik
|
||||
joy.mangano
|
||||
josephine.cochrane
|
||||
katherine.blodgett
|
||||
kathleen.antonelli
|
||||
lynn.conway
|
||||
margaret.hamilton
|
||||
maria.beasley
|
||||
marie.curie
|
||||
marjorie.joyner
|
||||
marlyn.meltzer
|
||||
mary.kies
|
||||
melitta.bentz
|
||||
milly.koss
|
||||
radia.perlman
|
||||
rosalind.franklin
|
||||
ruth.teitelbaum
|
||||
sarah.mather
|
||||
sophie.wilson
|
||||
stephanie.kwolek
|
||||
yvonne.brill
|
||||
162
prepare-tf/README.md
Normal file
162
prepare-tf/README.md
Normal file
@@ -0,0 +1,162 @@
|
||||
⚠️ This is work in progress. The UX needs to be improved,
|
||||
and the docs could be better.
|
||||
|
||||
This directory contains a Terraform configuration to deploy
|
||||
a bunch of Kubernetes clusters on various cloud providers,
|
||||
using their respective managed Kubernetes products.
|
||||
|
||||
## With shell wrapper
|
||||
|
||||
This is the recommended use. It makes it easy to start N clusters
|
||||
on any provider. It will create a directory with a name like
|
||||
`tag-YYYY-MM-DD-HH-MM-SS-SEED-PROVIDER`, copy the Terraform configuration
|
||||
to that directory, then create the clusters using that configuration.
|
||||
|
||||
1. One-time setup: configure provider authentication for the provider(s) that you wish to use.
|
||||
|
||||
- Digital Ocean:
|
||||
```bash
|
||||
doctl auth init
|
||||
```
|
||||
|
||||
- Google Cloud Platform: you will need to create a project named `prepare-tf`
|
||||
and enable the relevant APIs for this project (sorry, if you're new to GCP,
|
||||
this sounds vague; but if you're familiar with it you know what to do; if you
|
||||
want to change the project name you can edit the Terraform configuration)
|
||||
|
||||
- Linode:
|
||||
```bash
|
||||
linode-cli configure
|
||||
```
|
||||
|
||||
- Oracle Cloud: FIXME
|
||||
(set up `oci` through the `oci-cli` Python package)
|
||||
|
||||
- Scaleway: run `scw init`
|
||||
|
||||
2. Optional: set number of clusters, cluster size, and region.
|
||||
|
||||
By default, 1 cluster will be configured, with 2 nodes, and auto-scaling up to 5 nodes.
|
||||
|
||||
If you want, you can override these parameters, with the following variables.
|
||||
|
||||
```bash
|
||||
export TF_VAR_how_many_clusters=5
|
||||
export TF_VAR_min_nodes_per_pool=2
|
||||
export TF_VAR_max_nodes_per_pool=4
|
||||
export TF_VAR_location=xxx
|
||||
```
|
||||
|
||||
The `location` variable is optional. Each provider should have a default value.
|
||||
The value of the `location` variable is provider-specific. Examples:
|
||||
|
||||
| Provider | Example value | How to see possible values
|
||||
|---------------|-------------------|---------------------------
|
||||
| Digital Ocean | `ams3` | `doctl compute region list`
|
||||
| Google Cloud | `europe-north1-a` | `gcloud compute zones list`
|
||||
| Linode | `eu-central` | `linode-cli regions list`
|
||||
| Oracle Cloud | `eu-stockholm-1` | `oci iam region list`
|
||||
|
||||
You can also specify multiple locations, and then they will be
|
||||
used in round-robin fashion.
|
||||
|
||||
For example, with Google Cloud, since the default quotas are very
|
||||
low (my account is limited to 8 public IP addresses per zone, and
|
||||
my requests to increase that quota were denied) you can do the
|
||||
following:
|
||||
|
||||
```bash
|
||||
export TF_VAR_location=$(gcloud compute zones list --format=json | jq -r .[].name | grep ^europe)
|
||||
```
|
||||
|
||||
Then when you apply, clusters will be created across all available
|
||||
zones in Europe. (When I write this, there are 20+ zones in Europe,
|
||||
so even with my quota, I can create 40 clusters.)
|
||||
|
||||
3. Run!
|
||||
|
||||
```bash
|
||||
./run.sh <providername>
|
||||
```
|
||||
|
||||
(If you don't specify a provider name, it will list available providers.)
|
||||
|
||||
4. Shutting down
|
||||
|
||||
Go to the directory that was created by the previous step (`tag-YYYY-MM...`)
|
||||
and run `terraform destroy`.
|
||||
|
||||
You can also run `./clean.sh` which will destroy ALL clusters deployed by the previous run script.
|
||||
|
||||
## Without shell wrapper
|
||||
|
||||
Expert mode.
|
||||
|
||||
Useful to run steps sperarately, and/or when working on the Terraform configurations.
|
||||
|
||||
1. Select the provider you wish to use.
|
||||
|
||||
Go to the `source` directory and edit `main.tf`.
|
||||
|
||||
Change the `source` attribute of the `module "clusters"` section.
|
||||
|
||||
Check the content of the `modules` directory to see available choices.
|
||||
|
||||
2. Initialize the provider.
|
||||
|
||||
```bash
|
||||
terraform init
|
||||
```
|
||||
|
||||
3. Configure provider authentication.
|
||||
|
||||
See steps above, and add the following extra steps:
|
||||
|
||||
- Digital Coean:
|
||||
```bash
|
||||
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
|
||||
```
|
||||
|
||||
- Linode:
|
||||
```bash
|
||||
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
|
||||
```
|
||||
|
||||
4. Decide how many clusters and how many nodes per clusters you want.
|
||||
|
||||
5. Provision clusters.
|
||||
|
||||
```bash
|
||||
terraform apply
|
||||
```
|
||||
|
||||
6. Perform second stage provisioning.
|
||||
|
||||
This will install an SSH server on the clusters.
|
||||
|
||||
```bash
|
||||
cd stage2
|
||||
terraform init
|
||||
terraform apply
|
||||
```
|
||||
|
||||
7. Obtain cluster connection information.
|
||||
|
||||
The following command shows connection information, one cluster per line, ready to copy-paste in a shared document or spreadsheet.
|
||||
|
||||
```bash
|
||||
terraform output -json | jq -r 'to_entries[].value.value'
|
||||
```
|
||||
|
||||
8. Destroy clusters.
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
terraform destroy
|
||||
```
|
||||
|
||||
9. Clean up stage2.
|
||||
|
||||
```bash
|
||||
rm stage2/terraform.tfstate*
|
||||
```
|
||||
9
prepare-tf/cleanup.sh
Executable file
9
prepare-tf/cleanup.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
|
||||
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
|
||||
for T in tag-*; do
|
||||
(
|
||||
cd $T
|
||||
terraform apply -destroy -auto-approve && mv ../$T ../deleted$T
|
||||
)
|
||||
done
|
||||
49
prepare-tf/run.sh
Executable file
49
prepare-tf/run.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
TIME=$(which time)
|
||||
|
||||
PROVIDER=$1
|
||||
[ "$PROVIDER" ] || {
|
||||
echo "Please specify a provider as first argument, or 'ALL' for parallel mode."
|
||||
echo "Available providers:"
|
||||
ls -1 source/modules
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$TAG" ] || {
|
||||
TIMESTAMP=$(date +%Y-%m-%d-%H-%M-%S)
|
||||
RANDOMTAG=$(base64 /dev/urandom | tr A-Z a-z | tr -d /+ | head -c5)
|
||||
export TAG=tag-$TIMESTAMP-$RANDOMTAG
|
||||
}
|
||||
|
||||
[ "$PROVIDER" = "ALL" ] && {
|
||||
for PROVIDER in $(ls -1 source/modules); do
|
||||
$TERMINAL -T $TAG-$PROVIDER -e sh -c "
|
||||
export TAG=$TAG-$PROVIDER
|
||||
$0 $PROVIDER
|
||||
cd $TAG-$PROVIDER
|
||||
bash
|
||||
" &
|
||||
done
|
||||
exit 0
|
||||
}
|
||||
|
||||
[ -d "source/modules/$PROVIDER" ] || {
|
||||
echo "Provider '$PROVIDER' not found."
|
||||
echo "Available providers:"
|
||||
ls -1 source/modules
|
||||
exit 1
|
||||
}
|
||||
|
||||
export LINODE_TOKEN=$(grep ^token ~/.config/linode-cli | cut -d= -f2 | tr -d " ")
|
||||
export DIGITALOCEAN_ACCESS_TOKEN=$(grep ^access-token ~/.config/doctl/config.yaml | cut -d: -f2 | tr -d " ")
|
||||
|
||||
cp -a source $TAG
|
||||
cd $TAG
|
||||
cp -r modules/$PROVIDER modules/PROVIDER
|
||||
$TIME -o time.1.init terraform init
|
||||
$TIME -o time.2.stage1 terraform apply -auto-approve
|
||||
cd stage2
|
||||
$TIME -o ../time.3.init terraform init
|
||||
$TIME -o ../time.4.stage2 terraform apply -auto-approve
|
||||
19
prepare-tf/source/locals.tf
Normal file
19
prepare-tf/source/locals.tf
Normal file
@@ -0,0 +1,19 @@
|
||||
resource "random_string" "_" {
|
||||
length = 4
|
||||
number = false
|
||||
special = false
|
||||
upper = false
|
||||
}
|
||||
|
||||
resource "time_static" "_" {}
|
||||
|
||||
locals {
|
||||
timestamp = formatdate("YYYY-MM-DD-hh-mm", time_static._.rfc3339)
|
||||
tag = random_string._.result
|
||||
# Common tags to be assigned to all resources
|
||||
common_tags = [
|
||||
"created-by-terraform",
|
||||
format("created-at-%s", local.timestamp),
|
||||
format("created-for-%s", local.tag)
|
||||
]
|
||||
}
|
||||
88
prepare-tf/source/main.tf
Normal file
88
prepare-tf/source/main.tf
Normal file
@@ -0,0 +1,88 @@
|
||||
module "clusters" {
|
||||
source = "./modules/PROVIDER"
|
||||
for_each = local.clusters
|
||||
cluster_name = each.value.cluster_name
|
||||
min_nodes_per_pool = var.min_nodes_per_pool
|
||||
max_nodes_per_pool = var.max_nodes_per_pool
|
||||
enable_arm_pool = var.enable_arm_pool
|
||||
node_size = var.node_size
|
||||
common_tags = local.common_tags
|
||||
location = each.value.location
|
||||
}
|
||||
|
||||
locals {
|
||||
clusters = {
|
||||
for i in range(101, 101 + var.how_many_clusters) :
|
||||
i => {
|
||||
cluster_name = format("%s-%03d", local.tag, i)
|
||||
kubeconfig_path = format("./stage2/kubeconfig.%03d", i)
|
||||
externalips_path = format("./stage2/externalips.%03d", i)
|
||||
flags_path = format("./stage2/flags.%03d", i)
|
||||
location = local.locations[i % length(local.locations)]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "local_file" "stage2" {
|
||||
filename = "./stage2/main.tf"
|
||||
file_permission = "0644"
|
||||
content = templatefile(
|
||||
"./stage2.tmpl",
|
||||
{ clusters = local.clusters }
|
||||
)
|
||||
}
|
||||
|
||||
resource "local_file" "flags" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.flags_path
|
||||
file_permission = "0600"
|
||||
content = <<-EOT
|
||||
has_metrics_server: ${module.clusters[each.key].has_metrics_server}
|
||||
EOT
|
||||
}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.kubeconfig_path
|
||||
file_permission = "0600"
|
||||
content = module.clusters[each.key].kubeconfig
|
||||
}
|
||||
|
||||
resource "local_file" "externalips" {
|
||||
for_each = local.clusters
|
||||
filename = each.value.externalips_path
|
||||
file_permission = "0600"
|
||||
content = data.external.externalips[each.key].result.externalips
|
||||
}
|
||||
|
||||
resource "null_resource" "wait_for_nodes" {
|
||||
for_each = local.clusters
|
||||
provisioner "local-exec" {
|
||||
environment = {
|
||||
KUBECONFIG = local_file.kubeconfig[each.key].filename
|
||||
}
|
||||
command = <<-EOT
|
||||
set -e
|
||||
kubectl get nodes --watch | grep --silent --line-buffered .
|
||||
kubectl wait node --for=condition=Ready --all --timeout=10m
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
data "external" "externalips" {
|
||||
for_each = local.clusters
|
||||
depends_on = [null_resource.wait_for_nodes]
|
||||
program = [
|
||||
"sh",
|
||||
"-c",
|
||||
<<-EOT
|
||||
set -e
|
||||
cat >/dev/null
|
||||
export KUBECONFIG=${local_file.kubeconfig[each.key].filename}
|
||||
echo -n '{"externalips": "'
|
||||
kubectl get nodes \
|
||||
-o 'jsonpath={.items[*].status.addresses[?(@.type=="ExternalIP")].address}'
|
||||
echo -n '"}'
|
||||
EOT
|
||||
]
|
||||
}
|
||||
17
prepare-tf/source/modules/digitalocean/main.tf
Normal file
17
prepare-tf/source/modules/digitalocean/main.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
resource "digitalocean_kubernetes_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
tags = var.common_tags
|
||||
# Region is mandatory, so let's provide a default value.
|
||||
region = var.location != null ? var.location : "nyc1"
|
||||
version = var.k8s_version
|
||||
|
||||
node_pool {
|
||||
name = "x86"
|
||||
tags = var.common_tags
|
||||
size = local.node_type
|
||||
auto_scale = true
|
||||
min_nodes = var.min_nodes_per_pool
|
||||
max_nodes = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
|
||||
}
|
||||
11
prepare-tf/source/modules/digitalocean/outputs.tf
Normal file
11
prepare-tf/source/modules/digitalocean/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "kubeconfig" {
|
||||
value = digitalocean_kubernetes_cluster._.kube_config.0.raw_config
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = digitalocean_kubernetes_cluster._.id
|
||||
}
|
||||
|
||||
output "has_metrics_server" {
|
||||
value = false
|
||||
}
|
||||
8
prepare-tf/source/modules/digitalocean/providers.tf
Normal file
8
prepare-tf/source/modules/digitalocean/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
digitalocean = {
|
||||
source = "digitalocean/digitalocean"
|
||||
version = "2.12.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
57
prepare-tf/source/modules/digitalocean/variables.tf
Normal file
57
prepare-tf/source/modules/digitalocean/variables.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "s-1vcpu-2gb"
|
||||
"M" = "s-2vcpu-4gb"
|
||||
"L" = "s-4vcpu-8gb"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported regions, run:
|
||||
# doctl compute region list
|
||||
variable "location" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# doctl kubernetes options versions -o json | jq -r .[].slug
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.22.8-do.1"
|
||||
}
|
||||
65
prepare-tf/source/modules/googlecloud/main.tf
Normal file
65
prepare-tf/source/modules/googlecloud/main.tf
Normal file
@@ -0,0 +1,65 @@
|
||||
resource "google_container_cluster" "_" {
|
||||
name = var.cluster_name
|
||||
project = local.project
|
||||
location = local.location
|
||||
min_master_version = var.k8s_version
|
||||
|
||||
# To deploy private clusters, uncomment the section below,
|
||||
# and uncomment the block in network.tf.
|
||||
# Private clusters require extra resources (Cloud NAT,
|
||||
# router, network, subnet) and the quota for some of these
|
||||
# resources is fairly low on GCP; so if you want to deploy
|
||||
# a lot of private clusters (more than 10), you can use these
|
||||
# blocks as a base but you will probably have to refactor
|
||||
# things quite a bit (you will at least need to define a single
|
||||
# shared router and use it across all the clusters).
|
||||
/*
|
||||
network = google_compute_network._.name
|
||||
subnetwork = google_compute_subnetwork._.name
|
||||
|
||||
private_cluster_config {
|
||||
enable_private_nodes = true
|
||||
# This must be set to "false".
|
||||
# (Otherwise, access to the public endpoint is disabled.)
|
||||
enable_private_endpoint = false
|
||||
# This must be set to a /28.
|
||||
# I think it shouldn't collide with the pod network subnet.
|
||||
master_ipv4_cidr_block = "10.255.255.0/28"
|
||||
}
|
||||
# Private clusters require "VPC_NATIVE" networking mode
|
||||
# (as opposed to the legacy "ROUTES").
|
||||
networking_mode = "VPC_NATIVE"
|
||||
# ip_allocation_policy is required for VPC_NATIVE clusters.
|
||||
ip_allocation_policy {
|
||||
# This is the block that will be used for pods.
|
||||
cluster_ipv4_cidr_block = "10.0.0.0/12"
|
||||
# The services block is optional
|
||||
# (GKE will pick one automatically).
|
||||
#services_ipv4_cidr_block = ""
|
||||
}
|
||||
*/
|
||||
|
||||
node_pool {
|
||||
name = "x86"
|
||||
node_config {
|
||||
tags = var.common_tags
|
||||
machine_type = local.node_type
|
||||
}
|
||||
initial_node_count = var.min_nodes_per_pool
|
||||
autoscaling {
|
||||
min_node_count = var.min_nodes_per_pool
|
||||
max_node_count = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
}
|
||||
|
||||
# This is not strictly necessary.
|
||||
# We'll see if we end up using it.
|
||||
# (If it is removed, make sure to also remove the corresponding
|
||||
# key+cert variables from outputs.tf!)
|
||||
master_auth {
|
||||
client_certificate_config {
|
||||
issue_client_certificate = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
38
prepare-tf/source/modules/googlecloud/network.tf
Normal file
38
prepare-tf/source/modules/googlecloud/network.tf
Normal file
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
resource "google_compute_network" "_" {
|
||||
name = var.cluster_name
|
||||
project = local.project
|
||||
# The default is to create subnets automatically.
|
||||
# However, this creates one subnet per zone in all regions,
|
||||
# which causes a quick exhaustion of the subnet quota.
|
||||
auto_create_subnetworks = false
|
||||
}
|
||||
|
||||
resource "google_compute_subnetwork" "_" {
|
||||
name = var.cluster_name
|
||||
ip_cidr_range = "10.254.0.0/16"
|
||||
region = local.region
|
||||
network = google_compute_network._.id
|
||||
project = local.project
|
||||
}
|
||||
|
||||
resource "google_compute_router" "_" {
|
||||
name = var.cluster_name
|
||||
region = local.region
|
||||
network = google_compute_network._.name
|
||||
project = local.project
|
||||
}
|
||||
|
||||
resource "google_compute_router_nat" "_" {
|
||||
name = var.cluster_name
|
||||
router = google_compute_router._.name
|
||||
region = local.region
|
||||
project = local.project
|
||||
# Everyone in the network is allowed to NAT out.
|
||||
# (We would change this if we only wanted to allow specific subnets to NAT out.)
|
||||
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
|
||||
# Pick NAT addresses automatically.
|
||||
# (We would change this if we wanted to use specific addresses to NAT out.)
|
||||
nat_ip_allocate_option = "AUTO_ONLY"
|
||||
}
|
||||
*/
|
||||
35
prepare-tf/source/modules/googlecloud/outputs.tf
Normal file
35
prepare-tf/source/modules/googlecloud/outputs.tf
Normal file
@@ -0,0 +1,35 @@
|
||||
data "google_client_config" "_" {}
|
||||
|
||||
output "kubeconfig" {
|
||||
value = <<-EOT
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
current-context: ${google_container_cluster._.name}
|
||||
clusters:
|
||||
- name: ${google_container_cluster._.name}
|
||||
cluster:
|
||||
server: https://${google_container_cluster._.endpoint}
|
||||
certificate-authority-data: ${google_container_cluster._.master_auth[0].cluster_ca_certificate}
|
||||
contexts:
|
||||
- name: ${google_container_cluster._.name}
|
||||
context:
|
||||
cluster: ${google_container_cluster._.name}
|
||||
user: client-token
|
||||
users:
|
||||
- name: client-cert
|
||||
user:
|
||||
client-key-data: ${google_container_cluster._.master_auth[0].client_key}
|
||||
client-certificate-data: ${google_container_cluster._.master_auth[0].client_certificate}
|
||||
- name: client-token
|
||||
user:
|
||||
token: ${data.google_client_config._.access_token}
|
||||
EOT
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = google_container_cluster._.id
|
||||
}
|
||||
|
||||
output "has_metrics_server" {
|
||||
value = true
|
||||
}
|
||||
8
prepare-tf/source/modules/googlecloud/providers.tf
Normal file
8
prepare-tf/source/modules/googlecloud/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
google = {
|
||||
source = "hashicorp/google"
|
||||
version = "4.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
68
prepare-tf/source/modules/googlecloud/variables.tf
Normal file
68
prepare-tf/source/modules/googlecloud/variables.tf
Normal file
@@ -0,0 +1,68 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "e2-small"
|
||||
"M" = "e2-medium"
|
||||
"L" = "e2-standard-2"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported locations, run:
|
||||
# gcloud compute zones list
|
||||
variable "location" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# gcloud container get-server-config --region=europe-north1 '--format=flattened(channels)'
|
||||
# But it's also possible to just specify e.g. "1.20" and it figures it out.
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.21"
|
||||
}
|
||||
|
||||
locals {
|
||||
location = var.location != null ? var.location : "europe-north1-a"
|
||||
region = replace(local.location, "/-[a-z]$/", "")
|
||||
# Unfortunately, the following line doesn't work
|
||||
# (that attribute just returns an empty string)
|
||||
# so we have to hard-code the project name.
|
||||
#project = data.google_client_config._.project
|
||||
project = "prepare-tf"
|
||||
}
|
||||
17
prepare-tf/source/modules/linode/main.tf
Normal file
17
prepare-tf/source/modules/linode/main.tf
Normal file
@@ -0,0 +1,17 @@
|
||||
resource "linode_lke_cluster" "_" {
|
||||
label = var.cluster_name
|
||||
tags = var.common_tags
|
||||
# "region" is mandatory, so let's provide a default value if none was given.
|
||||
region = var.location != null ? var.location : "eu-central"
|
||||
k8s_version = var.k8s_version
|
||||
|
||||
pool {
|
||||
type = local.node_type
|
||||
count = var.min_nodes_per_pool
|
||||
autoscaler {
|
||||
min = var.min_nodes_per_pool
|
||||
max = max(var.min_nodes_per_pool, var.max_nodes_per_pool)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
11
prepare-tf/source/modules/linode/outputs.tf
Normal file
11
prepare-tf/source/modules/linode/outputs.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
output "kubeconfig" {
|
||||
value = base64decode(linode_lke_cluster._.kubeconfig)
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
value = linode_lke_cluster._.id
|
||||
}
|
||||
|
||||
output "has_metrics_server" {
|
||||
value = false
|
||||
}
|
||||
8
prepare-tf/source/modules/linode/providers.tf
Normal file
8
prepare-tf/source/modules/linode/providers.tf
Normal file
@@ -0,0 +1,8 @@
|
||||
terraform {
|
||||
required_providers {
|
||||
linode = {
|
||||
source = "linode/linode"
|
||||
version = "1.22.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
57
prepare-tf/source/modules/linode/variables.tf
Normal file
57
prepare-tf/source/modules/linode/variables.tf
Normal file
@@ -0,0 +1,57 @@
|
||||
variable "cluster_name" {
|
||||
type = string
|
||||
default = "deployed-with-terraform"
|
||||
}
|
||||
|
||||
variable "common_tags" {
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "node_size" {
|
||||
type = string
|
||||
default = "M"
|
||||
}
|
||||
|
||||
variable "min_nodes_per_pool" {
|
||||
type = number
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "max_nodes_per_pool" {
|
||||
type = number
|
||||
default = 5
|
||||
}
|
||||
|
||||
# FIXME
|
||||
variable "enable_arm_pool" {
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "node_types" {
|
||||
type = map(string)
|
||||
default = {
|
||||
"S" = "g6-standard-1"
|
||||
"M" = "g6-standard-2"
|
||||
"L" = "g6-standard-4"
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
node_type = var.node_types[var.node_size]
|
||||
}
|
||||
|
||||
# To view supported regions, run:
|
||||
# linode-cli regions list
|
||||
variable "location" {
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
# To view supported versions, run:
|
||||
# linode-cli lke versions-list --json | jq -r .[].id
|
||||
variable "k8s_version" {
|
||||
type = string
|
||||
default = "1.22"
|
||||
}
|
||||
59
prepare-tf/source/modules/oraclecloud/main.tf
Normal file
59
prepare-tf/source/modules/oraclecloud/main.tf
Normal file
@@ -0,0 +1,59 @@
|
||||
resource "oci_identity_compartment" "_" {
|
||||
name = var.cluster_name
|
||||
description = var.cluster_name
|
||||
enable_delete = true
|
||||
}
|
||||
|
||||
locals {
|
||||
compartment_id = oci_identity_compartment._.id
|
||||
}
|
||||
|
||||
data "oci_identity_availability_domains" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
}
|
||||
|
||||
data "oci_core_images" "_" {
|
||||
for_each = local.pools
|
||||
compartment_id = local.compartment_id
|
||||
operating_system = "Oracle Linux"
|
||||
operating_system_version = "7.9"
|
||||
shape = each.value.shape
|
||||
}
|
||||
|
||||
resource "oci_containerengine_cluster" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = "tf-oke"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
options {
|
||||
service_lb_subnet_ids = [oci_core_subnet.loadbalancers.id]
|
||||
}
|
||||
endpoint_config {
|
||||
is_public_ip_enabled = true
|
||||
subnet_id = oci_core_subnet.controlplane.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_containerengine_node_pool" "_" {
|
||||
for_each = local.pools
|
||||
cluster_id = oci_containerengine_cluster._.id
|
||||
compartment_id = local.compartment_id
|
||||
kubernetes_version = var.k8s_version
|
||||
name = each.key
|
||||
node_shape = each.value.shape
|
||||
node_shape_config {
|
||||
memory_in_gbs = local.node_type.memory_in_gbs
|
||||
ocpus = local.node_type.ocpus
|
||||
}
|
||||
node_config_details {
|
||||
size = var.min_nodes_per_pool
|
||||
placement_configs {
|
||||
availability_domain = data.oci_identity_availability_domains._.availability_domains[0].name
|
||||
subnet_id = oci_core_subnet.nodes.id
|
||||
}
|
||||
}
|
||||
node_source_details {
|
||||
image_id = data.oci_core_images._[each.key].images[0].id
|
||||
source_type = "image"
|
||||
}
|
||||
}
|
||||
81
prepare-tf/source/modules/oraclecloud/network.tf
Normal file
81
prepare-tf/source/modules/oraclecloud/network.tf
Normal file
@@ -0,0 +1,81 @@
|
||||
resource "oci_core_vcn" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/16"
|
||||
display_name = "tf-vcn"
|
||||
}
|
||||
|
||||
#
|
||||
# On OCI, you can have either "public" or "private" subnets.
|
||||
# In both cases, instances get addresses in the VCN CIDR block;
|
||||
# but instances in "public" subnets also get a public address.
|
||||
#
|
||||
# Then, to enable communication to the outside world, you need:
|
||||
# - for public subnets, an "internet gateway"
|
||||
# (will allow inbound and outbound traffic)
|
||||
# - for private subnets, a "NAT gateway"
|
||||
# (will only allow outbound traffic)
|
||||
# - optionally, for private subnets, a "service gateway"
|
||||
# (to access other OCI services, e.g. object store)
|
||||
#
|
||||
# In this configuration, we use public subnets, and since we
|
||||
# need outside access, we add an internet gateway.
|
||||
#
|
||||
# Note that the default routing table in a VCN is empty, so we
|
||||
# add the internet gateway to the default routing table.
|
||||
# Similarly, the default security group in a VCN blocks almost
|
||||
# everything, so we add a blanket rule in that security group.
|
||||
#
|
||||
|
||||
resource "oci_core_internet_gateway" "_" {
|
||||
compartment_id = local.compartment_id
|
||||
display_name = "tf-igw"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
}
|
||||
|
||||
resource "oci_core_default_route_table" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_route_table_id
|
||||
route_rules {
|
||||
destination = "0.0.0.0/0"
|
||||
destination_type = "CIDR_BLOCK"
|
||||
network_entity_id = oci_core_internet_gateway._.id
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_default_security_list" "_" {
|
||||
manage_default_resource_id = oci_core_vcn._.default_security_list_id
|
||||
ingress_security_rules {
|
||||
protocol = "all"
|
||||
source = "0.0.0.0/0"
|
||||
}
|
||||
egress_security_rules {
|
||||
protocol = "all"
|
||||
destination = "0.0.0.0/0"
|
||||
}
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "controlplane" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.254.0/24"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-controlplane"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "nodes" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.0.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-nodes"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
|
||||
resource "oci_core_subnet" "loadbalancers" {
|
||||
compartment_id = local.compartment_id
|
||||
cidr_block = "10.0.96.0/20"
|
||||
vcn_id = oci_core_vcn._.id
|
||||
display_name = "tf-loadbalancers"
|
||||
route_table_id = oci_core_default_route_table._.id
|
||||
security_list_ids = [oci_core_default_security_list._.id]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user