Compare commits
573 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
830d56dac1 | ||
|
|
a9c2c0de89 | ||
|
|
b40428825d | ||
|
|
c0316956a8 | ||
|
|
4c8f77e883 | ||
|
|
490697ec55 | ||
|
|
3b44dfc210 | ||
|
|
faf26b2254 | ||
|
|
87242ff005 | ||
|
|
b6b4888177 | ||
|
|
bd0c7d354d | ||
|
|
e1c6aa8459 | ||
|
|
11c315289c | ||
|
|
0428024946 | ||
|
|
f55df56eac | ||
|
|
88e08fa0ec | ||
|
|
01e07ab411 | ||
|
|
e0d6865df3 | ||
|
|
57e3e12f09 | ||
|
|
d3fb03a752 | ||
|
|
eb86fec050 | ||
|
|
35c83fbd4d | ||
|
|
4ad4721965 | ||
|
|
f4b6de4c40 | ||
|
|
54e795323e | ||
|
|
61a4c152b3 | ||
|
|
eface0f792 | ||
|
|
2316af9731 | ||
|
|
880b36e0fa | ||
|
|
ac7da57454 | ||
|
|
20cc50b748 | ||
|
|
4956790e2b | ||
|
|
9069c9be47 | ||
|
|
be33e55c11 | ||
|
|
db94780ab5 | ||
|
|
ae80c50eb3 | ||
|
|
6244a8354c | ||
|
|
3e7c102728 | ||
|
|
351f977d21 | ||
|
|
081b4c72b3 | ||
|
|
cb8086754b | ||
|
|
34c02a96f6 | ||
|
|
f29a10ba5f | ||
|
|
0656dbb803 | ||
|
|
99fb71ecf9 | ||
|
|
da180c9ce0 | ||
|
|
4cced97991 | ||
|
|
a00e4544f9 | ||
|
|
b1984dc66d | ||
|
|
8d25078c47 | ||
|
|
bc85d8b73c | ||
|
|
de459fb5da | ||
|
|
2b707423ff | ||
|
|
285cef0f02 | ||
|
|
f6686f6efa | ||
|
|
2a809a79c4 | ||
|
|
f477df2a84 | ||
|
|
464dc7ef49 | ||
|
|
b550865da3 | ||
|
|
89c8615ce4 | ||
|
|
cb2152d5a7 | ||
|
|
4bace03fc3 | ||
|
|
e3225a383c | ||
|
|
9a046d8b2c | ||
|
|
764433bd04 | ||
|
|
0e54d84ebb | ||
|
|
b0faf7d31e | ||
|
|
47cc705c98 | ||
|
|
17869a4e0f | ||
|
|
2a7749839e | ||
|
|
aabbdd96a3 | ||
|
|
5d6f512df1 | ||
|
|
1b4bd884dc | ||
|
|
1a0858d350 | ||
|
|
f03279183e | ||
|
|
e2a0648989 | ||
|
|
72f32aba19 | ||
|
|
a27a9efba2 | ||
|
|
d9203a3e95 | ||
|
|
fcce4d5f83 | ||
|
|
5349649515 | ||
|
|
4a474d5749 | ||
|
|
8be3eebdbe | ||
|
|
b9fee273eb | ||
|
|
ef0e653729 | ||
|
|
fad65dc625 | ||
|
|
a161a7c37d | ||
|
|
afe719eef1 | ||
|
|
dc470f247d | ||
|
|
417f14038a | ||
|
|
aba527f461 | ||
|
|
bd0960908b | ||
|
|
3b7f18604f | ||
|
|
ef697e48df | ||
|
|
13b85aa386 | ||
|
|
8898a13eec | ||
|
|
d30af82691 | ||
|
|
a1f7066b99 | ||
|
|
feb906d728 | ||
|
|
5394ec6ca3 | ||
|
|
0ecefc6563 | ||
|
|
9ed00b98e6 | ||
|
|
ed6b95fb5d | ||
|
|
f0f41bd0da | ||
|
|
fb9af3bf52 | ||
|
|
b65a7cff14 | ||
|
|
17f99abadc | ||
|
|
df3866fa24 | ||
|
|
f52fe45c46 | ||
|
|
c04d8ddc85 | ||
|
|
3ecd84b68a | ||
|
|
9ba9c65755 | ||
|
|
5e68fd8fe0 | ||
|
|
e6f20674ec | ||
|
|
0990317595 | ||
|
|
382d3274f3 | ||
|
|
55516c833e | ||
|
|
cac1631523 | ||
|
|
d1eb860918 | ||
|
|
6c76bd6a97 | ||
|
|
462d52332c | ||
|
|
63a29b4b59 | ||
|
|
e366dc3959 | ||
|
|
0ab8843418 | ||
|
|
ce5fe906aa | ||
|
|
09c9743465 | ||
|
|
8290e84c3f | ||
|
|
678aca6229 | ||
|
|
d6a94dfa5e | ||
|
|
3fd1882e43 | ||
|
|
d40996daa9 | ||
|
|
b5956e43a5 | ||
|
|
c97767b54f | ||
|
|
464984f091 | ||
|
|
d7b21b5814 | ||
|
|
3230a70475 | ||
|
|
b0c9034994 | ||
|
|
22f9c36b15 | ||
|
|
d5d0295736 | ||
|
|
80afd43c9f | ||
|
|
32ef65820d | ||
|
|
eeb12c232b | ||
|
|
ce8d5f2516 | ||
|
|
1ac72ff22f | ||
|
|
501bd7a7ca | ||
|
|
ba3249f220 | ||
|
|
c156322fe3 | ||
|
|
ca622ef9ae | ||
|
|
a9c324e2e5 | ||
|
|
95a32ee5d4 | ||
|
|
9874700b28 | ||
|
|
994162c5b0 | ||
|
|
8ba99bd6c6 | ||
|
|
16438ebed8 | ||
|
|
f750073af6 | ||
|
|
6b10c89d2f | ||
|
|
8bd1f53568 | ||
|
|
9db4ccc5f1 | ||
|
|
fd49c238f5 | ||
|
|
b53adbfd6e | ||
|
|
994ca7687d | ||
|
|
c2bb50933a | ||
|
|
b027e23b99 | ||
|
|
728ac21ffa | ||
|
|
4595b79ddd | ||
|
|
335ecfbe27 | ||
|
|
fd8ffeb607 | ||
|
|
4cdfcc1347 | ||
|
|
7c785726d9 | ||
|
|
69141e5765 | ||
|
|
223aa6d4c9 | ||
|
|
2204fdad63 | ||
|
|
880a392887 | ||
|
|
2ab9dc3949 | ||
|
|
f87a057809 | ||
|
|
3e79845175 | ||
|
|
c769226e79 | ||
|
|
97d87b6a56 | ||
|
|
b68010e072 | ||
|
|
dc18f27948 | ||
|
|
d3f75feb12 | ||
|
|
94a64d1f75 | ||
|
|
ec523d3490 | ||
|
|
20b8a3aca0 | ||
|
|
37c548bf8d | ||
|
|
0ebbdae4f8 | ||
|
|
ec443e6eac | ||
|
|
b2ec531183 | ||
|
|
0f3de13d26 | ||
|
|
dd099e750f | ||
|
|
aab2250e8d | ||
|
|
13243c984a | ||
|
|
df3a906bcf | ||
|
|
33664d7e40 | ||
|
|
8b22f22bd3 | ||
|
|
05aad8ce56 | ||
|
|
c91b4b3674 | ||
|
|
f64953c411 | ||
|
|
751854b310 | ||
|
|
620647b2da | ||
|
|
a8f8582ea6 | ||
|
|
7dceac8dc6 | ||
|
|
ad7c3b71e7 | ||
|
|
989dcff863 | ||
|
|
09a5b05a9c | ||
|
|
e2f4dd0dce | ||
|
|
7f7f649c7f | ||
|
|
e38979a443 | ||
|
|
c87d6ffc47 | ||
|
|
22a40409f2 | ||
|
|
e7df0f15d8 | ||
|
|
4f70df8b61 | ||
|
|
6a6c83a1c6 | ||
|
|
b0cbec9d3e | ||
|
|
3098279911 | ||
|
|
d1d092505b | ||
|
|
db2ccf1c9f | ||
|
|
e19c33337f | ||
|
|
b42ee8f1ad | ||
|
|
9d48eaecb3 | ||
|
|
f7eb53ccc0 | ||
|
|
d5ed4db445 | ||
|
|
38652260b5 | ||
|
|
b231575940 | ||
|
|
899da1aec4 | ||
|
|
3de661b4e6 | ||
|
|
2391286d4a | ||
|
|
1e9e6e497a | ||
|
|
34ff302ea2 | ||
|
|
54e6428715 | ||
|
|
2f5ba4820a | ||
|
|
c69a6079d1 | ||
|
|
c3d8b959e1 | ||
|
|
0c1c2535a8 | ||
|
|
c9547220bf | ||
|
|
abfc65a546 | ||
|
|
f238820c0d | ||
|
|
521fbf98bb | ||
|
|
dedfbb136b | ||
|
|
3b813ad02f | ||
|
|
339d6497ba | ||
|
|
3d0cfddefa | ||
|
|
e154611090 | ||
|
|
1ddaeb3aae | ||
|
|
d3580c8bc1 | ||
|
|
cb58ad680d | ||
|
|
3c5a7af78a | ||
|
|
b721dce799 | ||
|
|
1d72802abd | ||
|
|
f82350f17b | ||
|
|
68d581abc7 | ||
|
|
af2b55d47b | ||
|
|
f9a0436a42 | ||
|
|
4b90a67e4b | ||
|
|
9cbe3b1f2b | ||
|
|
f29e2195d3 | ||
|
|
8dd805712b | ||
|
|
ae7aa54e43 | ||
|
|
c1dd106680 | ||
|
|
21c299bdda | ||
|
|
0390fca416 | ||
|
|
7824b29df8 | ||
|
|
1556adb8fa | ||
|
|
813062f345 | ||
|
|
9171f46c60 | ||
|
|
6244f8c524 | ||
|
|
99784dfa47 | ||
|
|
b6e1b49ba4 | ||
|
|
378dfb9b9d | ||
|
|
2b17282b0e | ||
|
|
1c8c67b95b | ||
|
|
11e1e6c25b | ||
|
|
7904b4d04a | ||
|
|
aaad06870e | ||
|
|
305dc82de1 | ||
|
|
1a1f7c42d7 | ||
|
|
2fda2b0148 | ||
|
|
59fa575d20 | ||
|
|
b334ea59f1 | ||
|
|
792b118f79 | ||
|
|
e330690b7f | ||
|
|
c4a5b4a5fd | ||
|
|
303bc07c31 | ||
|
|
6b6370885a | ||
|
|
495890e165 | ||
|
|
0c0111094e | ||
|
|
fdd0035915 | ||
|
|
7c0eb8d41d | ||
|
|
1bfbca5e19 | ||
|
|
2b54d83a51 | ||
|
|
12248dea3d | ||
|
|
4e8c2b66c0 | ||
|
|
3b1020a8f3 | ||
|
|
986f2ed114 | ||
|
|
674923c036 | ||
|
|
f3c6a7a41e | ||
|
|
9ca69e91f9 | ||
|
|
e4939f6dcb | ||
|
|
fcad29ddba | ||
|
|
cae3c6041f | ||
|
|
7e08b9a7ce | ||
|
|
a21f199847 | ||
|
|
7b89d69a1c | ||
|
|
8b71843325 | ||
|
|
489e0e1653 | ||
|
|
71b653eee9 | ||
|
|
96fc9149a0 | ||
|
|
3e4e45cd6e | ||
|
|
11bda430c6 | ||
|
|
293387e0d1 | ||
|
|
092eeb0274 | ||
|
|
f483e812a5 | ||
|
|
8fc1ed14ae | ||
|
|
b6a5b74c86 | ||
|
|
d46ba85d95 | ||
|
|
43f8b79111 | ||
|
|
08272fc37d | ||
|
|
672d956ed2 | ||
|
|
83f855f2a2 | ||
|
|
438639dc71 | ||
|
|
3351f7363f | ||
|
|
62b05eda6d | ||
|
|
8fe29e453f | ||
|
|
25b2e3b7f7 | ||
|
|
5757e4a48c | ||
|
|
dada857f4d | ||
|
|
a344f6a6ba | ||
|
|
574583f6be | ||
|
|
ae540b4904 | ||
|
|
a6ab731b67 | ||
|
|
65ffdf8e3a | ||
|
|
c745f28143 | ||
|
|
657070dca6 | ||
|
|
8a97782087 | ||
|
|
daf50e3d96 | ||
|
|
1b88ec7354 | ||
|
|
0efc4710a1 | ||
|
|
66d96a138d | ||
|
|
477989a214 | ||
|
|
117e2e486e | ||
|
|
217e12fb79 | ||
|
|
e8e5173924 | ||
|
|
65c94cc203 | ||
|
|
09310b55b4 | ||
|
|
1731e8c2ed | ||
|
|
5addb80f91 | ||
|
|
d8a86edcec | ||
|
|
d2f3cfda24 | ||
|
|
535257935d | ||
|
|
8cdc619124 | ||
|
|
ea5799420e | ||
|
|
a16d2c5e98 | ||
|
|
a33c16fbbd | ||
|
|
d8dfc62794 | ||
|
|
033a18f8f9 | ||
|
|
a1700fcde5 | ||
|
|
5274a91c0c | ||
|
|
710ad4da8a | ||
|
|
7a2c02996b | ||
|
|
beeb66b552 | ||
|
|
810eec9e8c | ||
|
|
7f5cb3ab7c | ||
|
|
b6cb9d2bee | ||
|
|
e812136d61 | ||
|
|
d4d38c8eaf | ||
|
|
2e17d6b701 | ||
|
|
00356d8c97 | ||
|
|
3b75b23e05 | ||
|
|
a707c618d5 | ||
|
|
84e669316a | ||
|
|
2e235a4e32 | ||
|
|
52c1ee8aba | ||
|
|
b541962163 | ||
|
|
f4c0cec4f9 | ||
|
|
db3a092d3d | ||
|
|
d590b9d17d | ||
|
|
9147ae9977 | ||
|
|
d2ff044228 | ||
|
|
a147869944 | ||
|
|
056ad4002a | ||
|
|
91cbf0c507 | ||
|
|
d57d5b5a56 | ||
|
|
24714d7168 | ||
|
|
422d225682 | ||
|
|
c57c07693f | ||
|
|
fa560446f1 | ||
|
|
6ba4b4abac | ||
|
|
45d0869b91 | ||
|
|
511a08889e | ||
|
|
6217f2ca25 | ||
|
|
e51df96777 | ||
|
|
f235689bf5 | ||
|
|
aed48e1bf0 | ||
|
|
0037e6e689 | ||
|
|
56071434e6 | ||
|
|
2d39c9ab0b | ||
|
|
b2fbb52361 | ||
|
|
a2236e76cf | ||
|
|
b1ea75f9c0 | ||
|
|
6aea80ce45 | ||
|
|
5ebe123994 | ||
|
|
d1910cd389 | ||
|
|
203e168397 | ||
|
|
b29a79da36 | ||
|
|
5ec586960f | ||
|
|
90aef60c18 | ||
|
|
9ce8da0b37 | ||
|
|
9d73905965 | ||
|
|
32383be1d0 | ||
|
|
6ffd6bbdfd | ||
|
|
b7169215ae | ||
|
|
f8a0206785 | ||
|
|
1d548665ee | ||
|
|
37616865b4 | ||
|
|
d31b3eab0a | ||
|
|
28a098af21 | ||
|
|
a849a84fd0 | ||
|
|
bbfec75e7f | ||
|
|
ced34a50e6 | ||
|
|
1311220b94 | ||
|
|
0e57b32ebc | ||
|
|
4753c8ac8d | ||
|
|
b99639c9fa | ||
|
|
f3d95add5b | ||
|
|
dc3d5060ca | ||
|
|
06a55f6a70 | ||
|
|
7a160cdb74 | ||
|
|
9688d288b7 | ||
|
|
87c7c984de | ||
|
|
e5cccfe88b | ||
|
|
197518b0b4 | ||
|
|
7ac8e5e539 | ||
|
|
cec4f9136d | ||
|
|
4299b72d7f | ||
|
|
eff68db336 | ||
|
|
74a6eb6b80 | ||
|
|
21fe27935f | ||
|
|
e3a8ff90da | ||
|
|
8e6cea2d2d | ||
|
|
1c90a4f333 | ||
|
|
6123d9a5a4 | ||
|
|
587d3bb24e | ||
|
|
4465bd8449 | ||
|
|
cf1f2763f6 | ||
|
|
25dc19f839 | ||
|
|
1ccc1d1b1e | ||
|
|
1d96710890 | ||
|
|
edceda3302 | ||
|
|
755cc5bacd | ||
|
|
e0c86d685c | ||
|
|
ddb700f4f0 | ||
|
|
4bdddfc695 | ||
|
|
8b999f1323 | ||
|
|
2571086ff3 | ||
|
|
cd9d92296b | ||
|
|
f24ff618a9 | ||
|
|
4bf39149ec | ||
|
|
045c5bbd7c | ||
|
|
6eb3171817 | ||
|
|
289bad540c | ||
|
|
ac06447706 | ||
|
|
95de31d697 | ||
|
|
0037b4941c | ||
|
|
c251f57f06 | ||
|
|
129cb0e6fe | ||
|
|
73e0618ad3 | ||
|
|
6c2634b5e9 | ||
|
|
dac670113f | ||
|
|
c8039cdf5c | ||
|
|
a7cfc9a898 | ||
|
|
0f1a4f28de | ||
|
|
40f57466e2 | ||
|
|
feed6634a5 | ||
|
|
c85e686283 | ||
|
|
05ffd6cf75 | ||
|
|
e16855a1b4 | ||
|
|
d21eb135fd | ||
|
|
c5e12cc401 | ||
|
|
dc97d69d0c | ||
|
|
bac5d56076 | ||
|
|
973392bd85 | ||
|
|
30b36ba7f4 | ||
|
|
0db27a7335 | ||
|
|
facf23a055 | ||
|
|
6674373037 | ||
|
|
72712693a2 | ||
|
|
33709005b1 | ||
|
|
6ce83c551e | ||
|
|
2b638fe09d | ||
|
|
58a5cac9e8 | ||
|
|
e9d2af931a | ||
|
|
a996803db5 | ||
|
|
e34fc1851f | ||
|
|
740fe9c938 | ||
|
|
65854721de | ||
|
|
adde828e03 | ||
|
|
ffc2c7c967 | ||
|
|
0f195286a7 | ||
|
|
f768f93fe9 | ||
|
|
05cbff1fd8 | ||
|
|
7e94ecdbab | ||
|
|
648da19687 | ||
|
|
6c4b339c4b | ||
|
|
eee62032de | ||
|
|
8e8ee92fb2 | ||
|
|
f3be9e5442 | ||
|
|
fb296267f6 | ||
|
|
751ce3722b | ||
|
|
d99ffb0334 | ||
|
|
f831f385c4 | ||
|
|
f301c9bdc2 | ||
|
|
0909529e6b | ||
|
|
d0aacd03f6 | ||
|
|
f4c84946c0 | ||
|
|
2c72369b99 | ||
|
|
abcc662c96 | ||
|
|
792119d2d3 | ||
|
|
f0e675dea3 | ||
|
|
4413061640 | ||
|
|
8f57ff407e | ||
|
|
94f2d9074d | ||
|
|
fadcc219ec | ||
|
|
af5ac4acab | ||
|
|
069afd9b17 | ||
|
|
6741194034 | ||
|
|
7acba20056 | ||
|
|
0d2cf784f5 | ||
|
|
4db8230912 | ||
|
|
84c8b1a135 | ||
|
|
7cf930cbe9 | ||
|
|
d5e146ef8f | ||
|
|
cb5fb00d7b | ||
|
|
ed00b934ec | ||
|
|
dbaf3d1915 | ||
|
|
a625f2218c | ||
|
|
617e802d02 | ||
|
|
eca04893a8 | ||
|
|
14c96b034a | ||
|
|
f53271cb87 | ||
|
|
8007fe8cd2 | ||
|
|
11d8262c74 | ||
|
|
877314f53d | ||
|
|
27480ba66a | ||
|
|
d3d18ef836 | ||
|
|
c81d190719 | ||
|
|
9284a43860 | ||
|
|
6cab15551f | ||
|
|
f0fb8b3c11 | ||
|
|
778a34a382 | ||
|
|
25b1c7a8fa | ||
|
|
2c6360ad82 | ||
|
|
523f1cf0e3 | ||
|
|
4d6d1461cc | ||
|
|
49e016d4da | ||
|
|
b7a2d9da8c | ||
|
|
39c7591457 | ||
|
|
327438e236 | ||
|
|
ba4b3eec8f | ||
|
|
d06affc216 | ||
|
|
236540d89f | ||
|
|
a5b7605e27 | ||
|
|
3821cf1d67 | ||
|
|
be1737d908 | ||
|
|
b5a7ff6e6c | ||
|
|
9f937a1eec | ||
|
|
0ae3659949 | ||
|
|
736fbf0505 | ||
|
|
8dc0672718 | ||
|
|
27f598fbfc | ||
|
|
d3603c7187 | ||
|
|
83797fc0b3 | ||
|
|
517a4a3458 | ||
|
|
649cf0c852 |
@@ -1,4 +0,0 @@
|
||||
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
|
||||
# Ignore build and test binaries.
|
||||
bin/
|
||||
testbin/
|
||||
23
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: gomod
|
||||
directory: /
|
||||
schedule:
|
||||
interval: daily
|
||||
rebase-strategy: disabled
|
||||
commit-message:
|
||||
prefix: "feat(deps)"
|
||||
groups:
|
||||
k8s:
|
||||
patterns:
|
||||
- "k8s.io*"
|
||||
etcd:
|
||||
patterns:
|
||||
- "go.etcd.io/etcd/*"
|
||||
- package-ecosystem: github-actions
|
||||
directory: /
|
||||
schedule:
|
||||
interval: daily
|
||||
rebase-strategy: disabled
|
||||
commit-message:
|
||||
prefix: "chore(ci)"
|
||||
10
.github/release-template.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
This edge release can be pulled from Docker Hub as follows:
|
||||
|
||||
```
|
||||
docker pull clastix/kamaji:$TAG
|
||||
```
|
||||
|
||||
> As from the v1.0.0 release, CLASTIX no longer provides stable release artefacts.
|
||||
>
|
||||
> Stable release artefacts are offered on a subscription basis by CLASTIX, the main Kamaji project contributor.
|
||||
> Learn more from CLASTIX's [Support](https://clastix.io/support/) section.
|
||||
41
.github/workflows/ci.yaml
vendored
@@ -7,36 +7,45 @@ on:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: integration
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- run: make test
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.19'
|
||||
check-latest: true
|
||||
go-version-file: go.mod
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3.2.0
|
||||
with:
|
||||
version: v1.49.0
|
||||
only-new-issues: false
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
run: make golint
|
||||
# TODO(prometherion): enable back once golangci-lint is built from v1.24 rather than v1.23
|
||||
# uses: golangci/golangci-lint-action@v6.5.2
|
||||
# with:
|
||||
# version: v1.62.2
|
||||
# only-new-issues: false
|
||||
# args: --config .golangci.yml
|
||||
diff:
|
||||
name: diff
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.19'
|
||||
check-latest: true
|
||||
- run: make yaml-installation-file
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
go-version-file: go.mod
|
||||
- run: make manifests
|
||||
- name: Checking if generated manifests are not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
|
||||
- name: Checking if YAML installer generated untracked files
|
||||
- name: Checking if missing untracked files for generated manifests
|
||||
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
|
||||
- name: Checking if source code is not formatted
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
|
||||
91
.github/workflows/docker-ci.yml
vendored
@@ -1,91 +0,0 @@
|
||||
name: docker-ci
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
|
||||
jobs:
|
||||
docker-ci:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
GIT_MODIFIED_2=$(git diff --quiet && echo "" || echo ".dirty")
|
||||
# Export to GH_ENV
|
||||
echo "GIT_LAST_TAG=$VERSION" >> $GITHUB_ENV
|
||||
echo "GIT_HEAD_COMMIT=$GIT_HEAD_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_TAG_COMMIT=$GIT_TAG_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_MODIFIED=$(echo "$GIT_MODIFIED_1""$GIT_MODIFIED_2")" >> $GITHUB_ENV
|
||||
echo "GIT_REPO=$(git config --get remote.origin.url)" >> $GITHUB_ENV
|
||||
echo "BUILD_DATE=$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
images: |
|
||||
quay.io/${{ github.repository }}
|
||||
docker.io/${{ github.repository }}
|
||||
tags: |
|
||||
type=semver,pattern={{raw}}
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64,arm
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
|
||||
- name: Inspect builder
|
||||
run: |
|
||||
echo "Name: ${{ steps.buildx.outputs.name }}"
|
||||
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
|
||||
echo "Status: ${{ steps.buildx.outputs.status }}"
|
||||
echo "Flags: ${{ steps.buildx.outputs.flags }}"
|
||||
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
|
||||
|
||||
- name: Login to quay.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ secrets.QUAY_IO_USERNAME }}
|
||||
password: ${{ secrets.QUAY_IO_TOKEN }}
|
||||
|
||||
- name: Login to docker.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_IO_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_IO_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
id: build-release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
file: Dockerfile
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args:
|
||||
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
30
.github/workflows/e2e.yaml
vendored
@@ -4,42 +4,54 @@ on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- '.github/workflows/e2e.yaml'
|
||||
- 'api/**'
|
||||
- 'charts/kamaji/**'
|
||||
- 'controllers/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- '.ko.yaml'
|
||||
- 'go.*'
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- '.github/workflows/e2e.yaml'
|
||||
- 'api/**'
|
||||
- 'charts/kamaji/**'
|
||||
- 'controllers/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- '.ko.yaml'
|
||||
- 'go.*'
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
- 'internal/**'
|
||||
- 'cmd/**'
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
name: Kubernetes
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: '1.19'
|
||||
check-latest: true
|
||||
go-version-file: go.mod
|
||||
- name: reclaim disk space from runner
|
||||
run: |
|
||||
sudo rm -rf /usr/share/dotnet /usr/local/lib/android /opt/ghc /opt/hostedtoolcache/CodeQL
|
||||
- run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y golang-cfssl
|
||||
sudo swapoff -a
|
||||
sudo modprobe br_netfilter
|
||||
- name: install required Go tools
|
||||
run: make kind ko helm ginkgo
|
||||
- name: cleaning up go mod
|
||||
run: go clean -modcache
|
||||
- name: e2e testing
|
||||
run: make e2e
|
||||
|
||||
27
.github/workflows/helm.yaml
vendored
@@ -2,8 +2,7 @@ name: Helm Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
tags: [ "helm-v*" ]
|
||||
branches: [ "master" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
@@ -12,26 +11,36 @@ jobs:
|
||||
name: diff
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: make -C charts/kamaji docs
|
||||
- name: Checking if Helm docs is not aligned
|
||||
- name: Checking if Kamaji Helm Chart docs is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked changes have not been committed" && git --no-pager diff && exit 1; fi
|
||||
- run: make -C charts/kamaji-crds docs
|
||||
- name: Checking if Kamaji CRDs Helm Chart docs is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked changes have not been committed" && git --no-pager diff && exit 1; fi
|
||||
lint:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: azure/setup-helm@v1
|
||||
- uses: actions/checkout@v6
|
||||
- uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: 3.3.4
|
||||
- name: Linting Chart
|
||||
- name: Building dependencies
|
||||
run: |-
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
helm dependency build ./charts/kamaji
|
||||
- name: Linting Kamaji Helm Chart
|
||||
run: helm lint ./charts/kamaji
|
||||
- name: Linting Kamaji CRDS Helm Chart
|
||||
run: helm lint ./charts/kamaji-crds
|
||||
release:
|
||||
if: startsWith(github.ref, 'refs/tags/helm-v')
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/master'
|
||||
needs: [ "lint", "diff" ]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v6
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
|
||||
37
.github/workflows/ko-build.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Container image build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- edge-*
|
||||
- v*
|
||||
branches:
|
||||
- master
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to build"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
ko:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: "ko: install"
|
||||
run: make ko
|
||||
- name: "ko: login to quay.io container registry"
|
||||
run: ./bin/ko login quay.io -u ${{ secrets.QUAY_IO_USERNAME }} -p ${{ secrets.QUAY_IO_TOKEN }}
|
||||
- name: "ko: login to docker.io container registry"
|
||||
run: ./bin/ko login docker.io -u ${{ secrets.DOCKER_IO_USERNAME }} -p ${{ secrets.DOCKER_IO_TOKEN }}
|
||||
- name: "ko: build and push tag"
|
||||
run: make VERSION=${{ github.event.inputs.tag }} KO_LOCAL=false KO_PUSH=true build
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
- name: "ko: build and push latest"
|
||||
run: make VERSION=latest KO_LOCAL=false KO_PUSH=true build
|
||||
23
.github/workflows/pr.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check PR Title
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, reopened, synchronize]
|
||||
|
||||
jobs:
|
||||
semantic-pr-title:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: amannn/action-semantic-pull-request@v6
|
||||
with:
|
||||
types: |
|
||||
feat
|
||||
fix
|
||||
chore
|
||||
docs
|
||||
style
|
||||
refactor
|
||||
perf
|
||||
test
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
75
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
name: Weekly Edge Release
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 7 * * 1' # Every Monday at 9 AM CET
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: generating date metadata
|
||||
id: date
|
||||
run: |
|
||||
CURRENT_DATE=$(date -u +'%Y-%m-%d')
|
||||
YY=$(date -u +'%y')
|
||||
M=$(date -u +'%_m' | sed 's/ //g')
|
||||
FIRST_OF_MONTH=$(date -u -d "$CURRENT_DATE" +%Y-%m-01)
|
||||
WEEK_NUM=$(( (($(date -u +%s) - $(date -u -d "$FIRST_OF_MONTH" +%s)) / 86400 + $(date -u -d "$FIRST_OF_MONTH" +%u) - 1) / 7 + 1 ))
|
||||
|
||||
echo "yy=$YY" >> $GITHUB_OUTPUT
|
||||
echo "month=$M" >> $GITHUB_OUTPUT
|
||||
echo "week=$WEEK_NUM" >> $GITHUB_OUTPUT
|
||||
echo "date=$CURRENT_DATE" >> $GITHUB_OUTPUT
|
||||
- name: generating tag metadata
|
||||
id: tag
|
||||
run: |
|
||||
TAG="edge-${{ steps.date.outputs.yy }}.${{ steps.date.outputs.month }}.${{ steps.date.outputs.week }}"
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
- name: generate release notes from template
|
||||
run: |
|
||||
export TAG="${{ steps.tag.outputs.tag }}"
|
||||
envsubst < .github/release-template.md > release-notes.md
|
||||
- name: generate release notes from template
|
||||
run: |
|
||||
export TAG="${{ steps.tag.outputs.tag }}"
|
||||
envsubst < .github/release-template.md > release-notes-header.md
|
||||
- name: generate GitHub release notes
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release --repo "$GITHUB_REPOSITORY" \
|
||||
create "${{ steps.tag.outputs.tag }}" \
|
||||
--generate-notes \
|
||||
--draft \
|
||||
--title "temp" \
|
||||
--notes "temp" > /dev/null || true
|
||||
|
||||
gh release view "${{ steps.tag.outputs.tag }}" \
|
||||
--json body --jq .body > auto-notes.md
|
||||
|
||||
gh release delete "${{ steps.tag.outputs.tag }}" --yes || true
|
||||
- name: combine notes
|
||||
run: |
|
||||
cat release-notes-header.md auto-notes.md > release-notes.md
|
||||
- name: create GitHub release
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release create "${{ steps.tag.outputs.tag }}" \
|
||||
--title "${{ steps.tag.outputs.tag }}" \
|
||||
--notes-file release-notes.md
|
||||
- name: trigger container build workflow
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.WORKFLOW_TOKEN }}
|
||||
run: |
|
||||
gh workflow run "Container image build" \
|
||||
--ref master \
|
||||
-f tag="${{ steps.tag.outputs.tag }}"
|
||||
9
.gitignore
vendored
@@ -24,10 +24,17 @@ bin
|
||||
*~
|
||||
.vscode
|
||||
|
||||
# Tilt files.
|
||||
.tiltbuild
|
||||
|
||||
**/*.kubeconfig
|
||||
**/*.crt
|
||||
**/*.key
|
||||
**/*.pem
|
||||
**/*.csr
|
||||
**/server-csr.json
|
||||
.DS_Store
|
||||
|
||||
**/server-csr.json
|
||||
!deploy/kine/mysql/server-csr.json
|
||||
!deploy/kine/nats/server-csr.json
|
||||
charts/kamaji/charts
|
||||
|
||||
118
.golangci.yml
@@ -1,48 +1,76 @@
|
||||
linters-settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/clastix/kamaji)
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2022 Clastix Labs
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
version: "2"
|
||||
linters:
|
||||
default: all
|
||||
disable:
|
||||
- wrapcheck
|
||||
- gomnd
|
||||
- scopelint
|
||||
- varnamelen
|
||||
- testpackage
|
||||
- tagliatelle
|
||||
- paralleltest
|
||||
- ireturn
|
||||
- goerr113
|
||||
- gochecknoglobals
|
||||
- exhaustivestruct
|
||||
- wsl
|
||||
- exhaustive
|
||||
- nosprintfhostport
|
||||
- nonamedreturns
|
||||
- interfacebloat
|
||||
- exhaustruct
|
||||
- lll
|
||||
- gosec
|
||||
- gomoddirectives
|
||||
- godox
|
||||
- gochecknoinits
|
||||
- funlen
|
||||
- dupl
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- golint
|
||||
- interfacer
|
||||
- structcheck
|
||||
- varcheck
|
||||
- nosnakecase
|
||||
- ifshort
|
||||
- maligned
|
||||
enable-all: true
|
||||
- depguard
|
||||
- dupl
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- godox
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- interfacebloat
|
||||
- ireturn
|
||||
- lll
|
||||
- mnd
|
||||
- nestif
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- paralleltest
|
||||
- perfsprint
|
||||
- tagliatelle
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
settings:
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -QF1008
|
||||
goheader:
|
||||
template: |-
|
||||
Copyright 2022 Clastix Labs
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
revive:
|
||||
rules:
|
||||
- name: dot-imports
|
||||
arguments:
|
||||
- allowedPackages:
|
||||
- github.com/onsi/ginkgo/v2
|
||||
- github.com/onsi/gomega
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard
|
||||
- default
|
||||
- prefix(github.com/clastix/kamaji/)
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
9
.ko.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
defaultPlatforms:
|
||||
- linux/arm64
|
||||
- linux/amd64
|
||||
- linux/arm
|
||||
builds:
|
||||
- id: kamaji
|
||||
main: .
|
||||
ldflags:
|
||||
- '{{ if index .Env "LD_FLAGS" }}{{ .Env.LD_FLAGS }}{{ end }}'
|
||||
45
ADOPTERS.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Adopters
|
||||
|
||||
This is a list of companies that have adopted Kamaji.
|
||||
Feel free to open a Pull-Request to get yours listed.
|
||||
|
||||
### Adopter list (alphabetically)
|
||||
|
||||
| Type | Name | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|:-|
|
||||
| Vendor | Aknostic | 2023 | [link](https://aknostic.com) | Aknostic is a cloud-native consultancy company using Kamaji to build a Kubernetes based PaaS. |
|
||||
| Vendor | Aruba | 2025 | [link](https://www.arubacloud.com/) | Aruba Cloud is an Italian Cloud Service Provider using Kamaji to build and offer [Managed Kubernetes Service](https://my.arubacloud.com). |
|
||||
| Vendor | CBWS | 2025 | [link](https://cbws.nl) | CBWS is an European Cloud Provider using Kamaji to build and offer their [Managed Kubernetes Service](https://cbws.nl/cloud/kubernetes/). |
|
||||
| Vendor | Coredge | 2025 | [link](https://coredge.io/) | Coredge uses Kamaji in its K8saaS offering to save infrastructure costs in its Sovereign Cloud & AI Infrastructure Platform for end-user organisations. |
|
||||
| Vendor | DCloud | 2024 | [link](https://dcloud.co.id) | DCloud is an Indonesian Cloud Provider using Kamaji to build and offer [Managed Kubernetes Service](https://dcloud.co.id/dkubes.html). |
|
||||
| Vendor | Dinova | 2025 | [link](https://dinova.one/) | Dinova is an Italian cloud services provider that integrates Kamaji in its datacenters to offer fully managed Kubernetes clusters. |
|
||||
| Vendor | Hikube | 2024 | [link](https://hikube.cloud/) | Hikube.cloud is a Swiss sovereign cloud platform with triple replication across three Swiss datacenters, offering enterprise-grade infrastructure with full data sovereignty. |
|
||||
| End-user | KINX | 2024 | [link](https://kinx.net/?lang=en) | KINX is an Internet infrastructure service provider and will use kamaji for its new [Managed Kubernetes Service](https://kinx.net/service/cloud/kubernetes/intro/?lang=en). |
|
||||
| End-user | Namecheap | 2025 | [link](https://www.namecheap.com/) | Namecheap is an ICANN-accredited domain registrar and web hosting company that provides a wide range of internet-related services and uses Kamaji for both internal and external services. |
|
||||
| Vendor | Netalia | 2025 | [link](https://www.netalia.it) | Netalia uses Kamaji for the Italian cloud
|
||||
| Vendor | Netsons | 2023 | [link](https://www.netsons.com) | Netsons is an Italian hosting and cloud provider and uses Kamaji in its [Managed Kubernetes](https://www.netsons.com/kubernetes) offering. |
|
||||
| Vendor | NVIDIA | 2024 | [link](https://github.com/NVIDIA/doca-platform) | DOCA Platform Framework manages provisioning and service orchestration for NVIDIA Bluefield DPUs. |
|
||||
| R&D | Orange | 2024 | [link](https://gitlab.com/Orange-OpenSource/kanod) | Orange is a French telecommunications company using Kamaji for experimental research purpose, with Kanod research solution. |
|
||||
| Vendor | Platform9 | 2024 | [link](https://elasticmachinepool.com) | Platform9 uses Kamaji in its offering - Elastic Machine Pool, which is a tool for optimizing the cost of running kubernetes clusters in EKS. |
|
||||
| Vendor | Qumulus | 2024 | [link](https://www.qumulus.io) | Qumulus is a cloud provider and plans to use Kamaji for it's hosted Kubernetes service |
|
||||
| End-user | sevensphere | 2023 | [link](https://www.sevensphere.io) | Sevensphere provides consulting services for end-user companies / cloud providers and uses Kamaji for designing cloud/on-premises Kubernetes-as-a-Service platform. |
|
||||
| End-user | Sicuro Tech Lab | 2024 | [link](https://sicurotechlab.it/) | Sicuro Tech Lab offers cloud infrastructure for Web Agencies and uses kamaji to provide managed k8s services. |
|
||||
| Vendor | Sovereign Cloud Stack | 2024 | [link](https://sovereigncloudstack.org) | Sovereign Cloud Stack develops a standardized cloud platform and uses Kamaji in there Kubernetes-as-a-Service reference implementation |
|
||||
| R&D | TIM | 2024 | [link](https://www.gruppotim.it) | TIM is an Italian telecommunications company using Kamaji for experimental research and development purposes. |
|
||||
| End-user | Tinext Cloud | 2025 | [link](https://cloud.tinext.com) | Tinex Cloud is a Swiss cloud service provider using Kamaji to build their Managed Kubernetes Services. |
|
||||
| Vendor | Ænix | 2023 | [link](https://aenix.io/) | Ænix provides consulting services for cloud providers and uses Kamaji for running Kubernetes-as-a-Service in free PaaS platform [Cozystack](https://cozystack.io). |
|
||||
| End-user | Rackspace | 2024 | [link](https://spot.rackspace.com/) | Rackspace Spot uses Kamaji to manage our instances, offering fully-managed kubernetes infrastructure, auctioned in an open market. |
|
||||
| R&D | IONOS Cloud | 2024 | [link](https://cloud.ionos.com/) | IONOS Cloud is a German Cloud Provider evaluating Kamaji for its [Managed Kubernetes platform](https://cloud.ionos.com/managed/kubernetes). |
|
||||
| Vendor | OVHCloud | 2025 | [link](https://www.ovhcloud.com/) | OVHCloud is a European Cloud Provider that will use Kamaji for its Managed Kubernetes Service offer. |
|
||||
| Vendor | WOBCOM GmbH | 2024 | [link](https://www.wobcom.de/) | WOBCOM provides an [**Open Digital Platform**](https://www.wobcom.de/geschaeftskunden/odp/) solution for Smart Cities, which is provided for customers in a Managed Kubernetes provided by Kamaji. |
|
||||
| Vendor | Mistral AI | 2025 | [link](https://mistral.ai/products/mistral-compute) | Mistral provides a baremetal kubernetes service that uses Kamaji for control plane management. |
|
||||
|
||||
### Adopter Types
|
||||
|
||||
**End-user**: The organization runs Kamaji in production in some way.
|
||||
|
||||
**Integration**: The organization has a product that integrates with Kamaji, but does not contain Kamaji.
|
||||
|
||||
**Vendor**: The organization packages Kamaji in their product and sells it as part of their product.
|
||||
|
||||
**R&D**: Company that exploring innovative technologies and solutions for research and development purposes.
|
||||
40
Dockerfile
@@ -1,40 +0,0 @@
|
||||
# Build the manager binary
|
||||
FROM golang:1.19 as builder
|
||||
|
||||
WORKDIR /workspace
|
||||
# Copy the Go Modules manifests
|
||||
COPY go.mod go.mod
|
||||
COPY go.sum go.sum
|
||||
# cache deps before building and copying source so that we don't need to re-download as much
|
||||
# and so that source changes don't invalidate our downloaded layer
|
||||
RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY cmd/ cmd/
|
||||
COPY api/ api/
|
||||
COPY controllers/ controllers/
|
||||
COPY internal/ internal/
|
||||
COPY indexers/ indexers/
|
||||
|
||||
# Build
|
||||
ARG TARGETARCH
|
||||
ARG GIT_HEAD_COMMIT
|
||||
ARG GIT_TAG_COMMIT
|
||||
ARG GIT_LAST_TAG
|
||||
ARG GIT_MODIFIED
|
||||
ARG GIT_REPO
|
||||
ARG BUILD_DATE
|
||||
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH go build \
|
||||
-ldflags "-X github.com/clastix/kamaji/internal.GitRepo=$GIT_REPO -X github.com/clastix/kamaji/internal.GitTag=$GIT_LAST_TAG -X github.com/clastix/kamaji/internal.GitCommit=$GIT_HEAD_COMMIT -X github.com/clastix/kamaji/internal.GitDirty=$GIT_MODIFIED -X github.com/clastix/kamaji/internal.BuildTime=$BUILD_DATE" \
|
||||
-a -o kamaji main.go
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
FROM gcr.io/distroless/static:nonroot
|
||||
WORKDIR /
|
||||
COPY --from=builder /workspace/kamaji .
|
||||
USER 65532:65532
|
||||
|
||||
ENTRYPOINT ["/kamaji"]
|
||||
363
Makefile
@@ -3,40 +3,24 @@
|
||||
# To re-generate a bundle for another specific version without changing the standard setup, you can:
|
||||
# - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
|
||||
# - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
|
||||
VERSION ?= 0.2.2
|
||||
VERSION ?= $(or $(shell git describe --abbrev=0 --tags 2>/dev/null),$(GIT_HEAD_COMMIT))
|
||||
|
||||
# CHANNELS define the bundle channels used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
|
||||
# To re-generate a bundle for other specific channels without changing the standard setup, you can:
|
||||
# - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
|
||||
# - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
|
||||
ifneq ($(origin CHANNELS), undefined)
|
||||
BUNDLE_CHANNELS := --channels=$(CHANNELS)
|
||||
endif
|
||||
# ENVTEST_K8S_VERSION specifies the Kubernetes version to be used
|
||||
# during testing with the envtest environment. This ensures that
|
||||
# the tests run against the correct API and behavior for the
|
||||
# specific Kubernetes release being targeted (v1.31.0 in this case).
|
||||
ENVTEST_K8S_VERSION = 1.31.0
|
||||
|
||||
# DEFAULT_CHANNEL defines the default channel used in the bundle.
|
||||
# Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
|
||||
# To re-generate a bundle for any other default channel without changing the default setup, you can:
|
||||
# - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
|
||||
# - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
|
||||
ifneq ($(origin DEFAULT_CHANNEL), undefined)
|
||||
BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
|
||||
endif
|
||||
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||
|
||||
# IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
|
||||
# This variable is used to construct full image tags for bundle and catalog images.
|
||||
#
|
||||
# For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
|
||||
# clastix.io/operator-bundle:$VERSION and clastix.io/operator-catalog:$VERSION.
|
||||
IMAGE_TAG_BASE ?= clastix.io/operator
|
||||
|
||||
# BUNDLE_IMG defines the image:tag used for the bundle.
|
||||
# You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=<some-registry>/<project-name-bundle>:<tag>)
|
||||
BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
|
||||
# ENVTEST_VERSION defines the version of the setup-envtest binary
|
||||
# used to manage and download the Kubernetes binaries (like etcd,
|
||||
# kube-apiserver, and kubectl) required for testing. This version
|
||||
# ensures compatibility with the selected Kubernetes version and
|
||||
# must align closely with recent releases (release-0.19 is chosen here).
|
||||
# Mismatches between these versions could result in compatibility issues.
|
||||
ENVTEST_VERSION ?= release-0.19
|
||||
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= clastix/kamaji:v$(VERSION)
|
||||
CONTAINER_REPOSITORY ?= docker.io/clastix/kamaji
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
@@ -50,6 +34,22 @@ endif
|
||||
SHELL = /usr/bin/env bash -o pipefail
|
||||
.SHELLFLAGS = -ec
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
## Tool Binaries
|
||||
APIDOCS_GEN ?= $(LOCALBIN)/crdoc
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
|
||||
GINKGO ?= $(LOCALBIN)/ginkgo
|
||||
GOLANGCI_LINT ?= $(LOCALBIN)/golangci-lint
|
||||
HELM ?= $(LOCALBIN)/helm
|
||||
KIND ?= $(LOCALBIN)/kind
|
||||
KO ?= $(LOCALBIN)/ko
|
||||
YQ ?= $(LOCALBIN)/yq
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest
|
||||
|
||||
all: build
|
||||
|
||||
##@ General
|
||||
@@ -70,39 +70,79 @@ help: ## Display this help.
|
||||
|
||||
##@ Binary
|
||||
|
||||
.PHONY: ko
|
||||
ko: $(KO) ## Download ko locally if necessary.
|
||||
$(KO): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/ko || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/google/ko@v0.18.1
|
||||
|
||||
.PHONY: yq
|
||||
yq: $(YQ) ## Download yq locally if necessary.
|
||||
$(YQ): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/yq || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/mikefarah/yq/v4@v4.44.2
|
||||
|
||||
.PHONY: helm
|
||||
HELM = $(shell pwd)/bin/helm
|
||||
helm: ## Download helm locally if necessary.
|
||||
$(call go-install-tool,$(HELM),helm.sh/helm/v3/cmd/helm@v3.9.0)
|
||||
helm: $(HELM) ## Download helm locally if necessary.
|
||||
$(HELM): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/helm || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" helm.sh/helm/v3/cmd/helm@v3.9.0
|
||||
|
||||
GINKGO = $(shell pwd)/bin/ginkgo
|
||||
ginkgo: ## Download ginkgo locally if necessary.
|
||||
$(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo@v2.6.0)
|
||||
.PHONY: ginkgo
|
||||
ginkgo: $(GINKGO) ## Download ginkgo locally if necessary.
|
||||
$(GINKGO): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/ginkgo || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/onsi/ginkgo/v2/ginkgo
|
||||
|
||||
KIND = $(shell pwd)/bin/kind
|
||||
kind: ## Download kind locally if necessary.
|
||||
$(call go-install-tool,$(KIND),sigs.k8s.io/kind/cmd/kind@v0.14.0)
|
||||
.PHONY: kind
|
||||
kind: $(KIND) ## Download kind locally if necessary.
|
||||
$(KIND): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/kind || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/kind/cmd/kind@v0.14.0
|
||||
|
||||
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.9.2)
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/controller-tools/cmd/controller-gen@v0.20.0
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
|
||||
golangci-lint: ## Download golangci-lint locally if necessary.
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0)
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/golangci-lint || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.0.2
|
||||
|
||||
KUSTOMIZE = $(shell pwd)/bin/kustomize
|
||||
kustomize: ## Download kustomize locally if necessary.
|
||||
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
|
||||
.PHONY: apidocs-gen
|
||||
apidocs-gen: $(APIDOCS_GEN) ## Download crdoc locally if necessary.
|
||||
$(APIDOCS_GEN): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/crdoc || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" fybrik.io/crdoc@latest
|
||||
|
||||
APIDOCS_GEN = $(shell pwd)/bin/crdoc
|
||||
apidocs-gen: ## Download crdoc locally if necessary.
|
||||
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@latest)
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) CGO_ENABLED=0 go install -ldflags="-s -w" sigs.k8s.io/controller-runtime/tools/setup-envtest@$(ENVTEST_VERSION)
|
||||
|
||||
##@ Development
|
||||
|
||||
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
rbac: controller-gen yq
|
||||
$(CONTROLLER_GEN) rbac:roleName=manager-role paths="./..." output:stdout | $(YQ) '.rules' > ./charts/kamaji/controller-gen/clusterrole.yaml
|
||||
|
||||
webhook: controller-gen yq
|
||||
$(CONTROLLER_GEN) webhook paths="./..." output:stdout | $(YQ) 'select(documentIndex == 0) | .webhooks' > ./charts/kamaji/controller-gen/mutating-webhook.yaml
|
||||
$(CONTROLLER_GEN) webhook paths="./..." output:stdout | $(YQ) 'select(documentIndex == 1) | .webhooks' > ./charts/kamaji/controller-gen/validating-webhook.yaml
|
||||
$(YQ) -i 'map(.clientConfig.service.name |= "{{ include \"kamaji.webhookServiceName\" . }}")' ./charts/kamaji/controller-gen/mutating-webhook.yaml
|
||||
$(YQ) -i 'map(.clientConfig.service.namespace |= "{{ .Release.Namespace }}")' ./charts/kamaji/controller-gen/mutating-webhook.yaml
|
||||
$(YQ) -i 'map(.clientConfig.service.name |= "{{ include \"kamaji.webhookServiceName\" . }}")' ./charts/kamaji/controller-gen/validating-webhook.yaml
|
||||
$(YQ) -i 'map(.clientConfig.service.namespace |= "{{ .Release.Namespace }}")' ./charts/kamaji/controller-gen/validating-webhook.yaml
|
||||
|
||||
crds: controller-gen yq
|
||||
# kamaji chart
|
||||
$(CONTROLLER_GEN) crd webhook paths="./..." output:stdout | $(YQ) 'select(documentIndex == 0)' > ./charts/kamaji/crds/kamaji.clastix.io_datastores.yaml
|
||||
$(CONTROLLER_GEN) crd webhook paths="./..." output:stdout | $(YQ) 'select(documentIndex == 1)' > ./charts/kamaji/crds/kamaji.clastix.io_kubeconfiggenerators.yaml
|
||||
$(CONTROLLER_GEN) crd webhook paths="./..." output:stdout | $(YQ) 'select(documentIndex == 2)' > ./charts/kamaji/crds/kamaji.clastix.io_tenantcontrolplanes.yaml
|
||||
$(YQ) -i '. *n load("./charts/kamaji/controller-gen/crd-conversion.yaml")' ./charts/kamaji/crds/kamaji.clastix.io_tenantcontrolplanes.yaml
|
||||
# kamaji-crds chart
|
||||
cp ./charts/kamaji/controller-gen/crd-conversion.yaml ./charts/kamaji-crds/hack/crd-conversion.yaml
|
||||
$(YQ) '.spec' ./charts/kamaji/crds/kamaji.clastix.io_datastores.yaml > ./charts/kamaji-crds/hack/kamaji.clastix.io_datastores_spec.yaml
|
||||
$(YQ) '.spec' ./charts/kamaji/crds/kamaji.clastix.io_tenantcontrolplanes.yaml > ./charts/kamaji-crds/hack/kamaji.clastix.io_tenantcontrolplanes_spec.yaml
|
||||
$(YQ) '.spec' ./charts/kamaji/crds/kamaji.clastix.io_kubeconfiggenerators.yaml > ./charts/kamaji-crds/hack/kamaji.clastix.io_kubeconfiggenerators_spec.yaml
|
||||
$(YQ) -i '.conversion.webhook.clientConfig.service.name = "{{ .Values.kamajiService }}"' ./charts/kamaji-crds/hack/kamaji.clastix.io_tenantcontrolplanes_spec.yaml
|
||||
$(YQ) -i '.conversion.webhook.clientConfig.service.namespace = "{{ .Values.kamajiNamespace }}"' ./charts/kamaji-crds/hack/kamaji.clastix.io_tenantcontrolplanes_spec.yaml
|
||||
|
||||
manifests: rbac webhook crds ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
|
||||
|
||||
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
@@ -110,8 +150,13 @@ generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and
|
||||
golint: golangci-lint ## Linting the code according to the styling guide.
|
||||
$(GOLANGCI_LINT) run -c .golangci.yml
|
||||
|
||||
test:
|
||||
go test ./... -coverprofile cover.out
|
||||
## Run unit tests (all tests except E2E).
|
||||
.PHONY: test
|
||||
test: envtest ginkgo
|
||||
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" $(GINKGO) -r -v -coverprofile cover.out --trace \
|
||||
./api/... \
|
||||
./cmd/... \
|
||||
./internal/... \
|
||||
|
||||
_datastore-mysql:
|
||||
$(MAKE) NAME=$(NAME) -C deploy/kine/mysql mariadb
|
||||
@@ -132,170 +177,118 @@ datastore-postgres:
|
||||
$(MAKE) NAME=gold _datastore-postgres
|
||||
|
||||
_datastore-etcd:
|
||||
$(HELM) upgrade --install etcd-$(NAME) clastix/kamaji-etcd --create-namespace -n etcd-system --set datastore.enabled=true
|
||||
$(HELM) upgrade --install etcd-$(NAME) clastix/kamaji-etcd --create-namespace -n $(NAMESPACE) --set datastore.enabled=true --set fullnameOverride=etcd-$(NAME) $(EXTRA_ARGS)
|
||||
|
||||
_datastore-nats:
|
||||
$(MAKE) NAME=$(NAME) NAMESPACE=nats-system -C deploy/kine/nats nats
|
||||
kubectl apply -f $(shell pwd)/config/samples/kamaji_v1alpha1_datastore_nats_$(NAME).yaml
|
||||
|
||||
datastore-etcd: helm
|
||||
$(HELM) repo add clastix https://clastix.github.io/charts
|
||||
$(HELM) repo update
|
||||
$(MAKE) NAME=bronze _datastore-etcd
|
||||
$(MAKE) NAME=silver _datastore-etcd
|
||||
$(MAKE) NAME=gold _datastore-etcd
|
||||
$(MAKE) NAME=bronze NAMESPACE=etcd-system _datastore-etcd
|
||||
$(MAKE) NAME=silver NAMESPACE=etcd-system _datastore-etcd
|
||||
$(MAKE) NAME=gold NAMESPACE=etcd-system _datastore-etcd
|
||||
$(MAKE) NAME=primary NAMESPACE=kamaji-system EXTRA_ARGS='--set certManager.enabled=true --set certManager.issuerRef.kind=Issuer --set certManager.issuerRef.name=kamaji-selfsigned-issuer --set selfSignedCertificates.enabled=false' _datastore-etcd
|
||||
$(MAKE) NAME=secondary NAMESPACE=kamaji-system EXTRA_ARGS='--set certManager.enabled=true --set certManager.ca.create=false --set certManager.ca.nameOverride=etcd-primary-ca --set certManager.issuerRef.kind=Issuer --set certManager.issuerRef.name=kamaji-selfsigned-issuer --set selfSignedCertificates.enabled=false' _datastore-etcd
|
||||
|
||||
datastores: datastore-mysql datastore-etcd datastore-postgres ## Install all Kamaji DataStores with multiple drivers, and different tiers.
|
||||
datastore-nats: helm
|
||||
$(HELM) repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
$(HELM) repo update
|
||||
$(MAKE) NAME=bronze _datastore-nats
|
||||
$(MAKE) NAME=silver _datastore-nats
|
||||
$(MAKE) NAME=gold _datastore-nats
|
||||
$(MAKE) NAME=notls _datastore-nats
|
||||
|
||||
datastores: datastore-mysql datastore-etcd datastore-postgres datastore-nats ## Install all Kamaji DataStores with multiple drivers, and different tiers.
|
||||
|
||||
##@ Build
|
||||
|
||||
# Get information about git current status
|
||||
GIT_HEAD_COMMIT ?= $$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT ?= $$(git rev-parse --short v$(VERSION))
|
||||
GIT_TAG_COMMIT ?= $$(git rev-parse --short $(VERSION))
|
||||
GIT_MODIFIED_1 ?= $$(git diff $(GIT_HEAD_COMMIT) $(GIT_TAG_COMMIT) --quiet && echo "" || echo ".dev")
|
||||
GIT_MODIFIED_2 ?= $$(git diff --quiet && echo "" || echo ".dirty")
|
||||
GIT_MODIFIED ?= $$(echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
|
||||
GIT_REPO ?= $$(git config --get remote.origin.url)
|
||||
BUILD_DATE ?= $$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)
|
||||
|
||||
build: generate fmt vet ## Build manager binary.
|
||||
go build -o bin/manager main.go
|
||||
LD_FLAGS ?= "-X github.com/clastix/kamaji/internal.GitCommit=$(GIT_HEAD_COMMIT) \
|
||||
-X github.com/clastix/kamaji/internal.GitTag=$(VERSION) \
|
||||
-X github.com/clastix/kamaji/internal.GitDirty=$(GIT_MODIFIED) \
|
||||
-X github.com/clastix/kamaji/internal.BuildTime=$(BUILD_DATE) \
|
||||
-X github.com/clastix/kamaji/internal.GitRepo=$(GIT_REPO)"
|
||||
|
||||
run: manifests generate fmt vet ## Run a controller from your host.
|
||||
KO_PUSH ?= false
|
||||
KO_LOCAL ?= true
|
||||
|
||||
run: manifests generate ## Run a controller from your host.
|
||||
go run ./main.go
|
||||
|
||||
docker-build: ## Build docker image with the manager.
|
||||
docker build -t ${IMG} . --build-arg GIT_HEAD_COMMIT=$(GIT_HEAD_COMMIT) \
|
||||
--build-arg GIT_TAG_COMMIT=$(GIT_TAG_COMMIT) \
|
||||
--build-arg GIT_MODIFIED=$(GIT_MODIFIED) \
|
||||
--build-arg GIT_REPO=$(GIT_REPO) \
|
||||
--build-arg GIT_LAST_TAG=$(VERSION) \
|
||||
--build-arg BUILD_DATE=$(BUILD_DATE)
|
||||
build: $(KO)
|
||||
LD_FLAGS=$(LD_FLAGS) \
|
||||
KOCACHE=/tmp/ko-cache KO_DOCKER_REPO=${CONTAINER_REPOSITORY} \
|
||||
$(KO) build ./ --bare --tags=$(VERSION) --local=$(KO_LOCAL) --push=$(KO_PUSH)
|
||||
|
||||
docker-push: ## Push docker image with the manager.
|
||||
docker push ${IMG}
|
||||
|
||||
##@ Deployment
|
||||
##@ Development
|
||||
|
||||
metallb:
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.7/config/manifests/metallb-native.yaml
|
||||
kubectl apply -f https://kind.sigs.k8s.io/examples/loadbalancer/metallb-config.yaml
|
||||
echo ""
|
||||
docker network inspect -f '{{.IPAM.Config}}' kind
|
||||
kubectl apply -f "https://raw.githubusercontent.com/metallb/metallb/$$(curl "https://api.github.com/repos/metallb/metallb/releases/latest" | jq -r ".tag_name")/config/manifests/metallb-native.yaml"
|
||||
kubectl wait pods -n metallb-system -l app=metallb,component=controller --for=condition=Ready --timeout=10m
|
||||
kubectl wait pods -n metallb-system -l app=metallb,component=speaker --for=condition=Ready --timeout=2m
|
||||
@IPV4_PREFIX=$$(docker network inspect kind \
|
||||
-f '{{range .IPAM.Config}}{{println .Subnet " " .Gateway}}{{end}}' \
|
||||
| grep -v ':' \
|
||||
| awk '{print $$2}' \
|
||||
| sed -E 's|^([0-9]+\.[0-9]+)\..*$$|\1|'); \
|
||||
sed -E "s|172\.19|$$IPV4_PREFIX|g" hack/metallb.yaml | kubectl apply -f -
|
||||
|
||||
cert-manager:
|
||||
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.10.1/cert-manager.yaml
|
||||
$(HELM) repo add jetstack https://charts.jetstack.io
|
||||
$(HELM) upgrade --install cert-manager jetstack/cert-manager --namespace certmanager-system --create-namespace --set "installCRDs=true"
|
||||
|
||||
dev: generate manifests uninstall install rbac ## Full installation for development purposes
|
||||
go fmt ./...
|
||||
gateway-api:
|
||||
kubectl apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
|
||||
# Required for the TLSRoutes. Experimentals.
|
||||
kubectl apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/experimental-install.yaml
|
||||
kubectl wait --for=condition=Established crd/gateways.gateway.networking.k8s.io --timeout=60s
|
||||
|
||||
load: docker-build kind
|
||||
$(KIND) load docker-image --name kamaji ${IMG}
|
||||
envoy-gateway: gateway-api helm ## Install Envoy Gateway for Gateway API tests.
|
||||
$(HELM) upgrade --install eg oci://docker.io/envoyproxy/gateway-helm --version v1.6.1 -n envoy-gateway-system --create-namespace
|
||||
kubectl wait --timeout=5m -n envoy-gateway-system deployment/envoy-gateway --for=condition=Available
|
||||
|
||||
rbac: manifests kustomize ## Install RBAC into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/rbac | kubectl apply -f -
|
||||
|
||||
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete -f -
|
||||
|
||||
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
|
||||
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config.
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
|
||||
yaml-installation-file: manifests kustomize ## Create yaml installation file
|
||||
$(KUSTOMIZE) build config/default > config/install.yaml
|
||||
|
||||
.PHONY: bundle
|
||||
bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files.
|
||||
operator-sdk generate kustomize manifests -q
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
|
||||
$(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
|
||||
operator-sdk bundle validate ./bundle
|
||||
|
||||
.PHONY: bundle-build
|
||||
bundle-build: ## Build the bundle image.
|
||||
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
|
||||
|
||||
.PHONY: bundle-push
|
||||
bundle-push: ## Push the bundle image.
|
||||
$(MAKE) docker-push IMG=$(BUNDLE_IMG)
|
||||
|
||||
.PHONY: opm
|
||||
OPM = ./bin/opm
|
||||
opm: ## Download opm locally if necessary.
|
||||
ifeq (,$(wildcard $(OPM)))
|
||||
ifeq (,$(shell which opm 2>/dev/null))
|
||||
@{ \
|
||||
set -e ;\
|
||||
mkdir -p $(dir $(OPM)) ;\
|
||||
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
|
||||
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\
|
||||
chmod +x $(OPM) ;\
|
||||
}
|
||||
else
|
||||
OPM = $(shell which opm)
|
||||
endif
|
||||
endif
|
||||
|
||||
# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
|
||||
# These images MUST exist in a registry and be pull-able.
|
||||
BUNDLE_IMGS ?= $(BUNDLE_IMG)
|
||||
|
||||
# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
|
||||
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
|
||||
|
||||
# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
|
||||
ifneq ($(origin CATALOG_BASE_IMG), undefined)
|
||||
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
|
||||
endif
|
||||
|
||||
# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
|
||||
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
|
||||
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
|
||||
.PHONY: catalog-build
|
||||
catalog-build: opm ## Build a catalog image.
|
||||
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
|
||||
|
||||
# Push the catalog image.
|
||||
.PHONY: catalog-push
|
||||
catalog-push: ## Push a catalog image.
|
||||
$(MAKE) docker-push IMG=$(CATALOG_IMG)
|
||||
|
||||
define install-kustomize
|
||||
@[ -f $(1) ] || { \
|
||||
set -e ;\
|
||||
echo "Installing v$(2)" ;\
|
||||
cd bin ;\
|
||||
wget "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" ;\
|
||||
bash ./install_kustomize.sh $(2) ;\
|
||||
}
|
||||
endef
|
||||
|
||||
# go-install-tool will 'go install' any package $2 and install it to $1.
|
||||
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
define go-install-tool
|
||||
@[ -f $(1) ] || { \
|
||||
set -e ;\
|
||||
echo "Installing $(2)" ;\
|
||||
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
|
||||
}
|
||||
endef
|
||||
|
||||
.PHONY: env
|
||||
env:
|
||||
@make -C deploy/kind kind ingress-nginx
|
||||
load: kind
|
||||
$(KIND) load docker-image --name kamaji ${CONTAINER_REPOSITORY}:${VERSION}
|
||||
|
||||
##@ e2e
|
||||
|
||||
.PHONY: env
|
||||
env: kind
|
||||
$(KIND) create cluster --name kamaji
|
||||
|
||||
cleanup: kind
|
||||
$(KIND) delete cluster --name kamaji
|
||||
|
||||
.PHONY: e2e
|
||||
e2e: env load helm ginkgo cert-manager ## Create a KinD cluster, install Kamaji on it and run the test suite.
|
||||
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.pullPolicy=Never"
|
||||
e2e: env build load helm ginkgo cert-manager gateway-api envoy-gateway ## Create a KinD cluster, install Kamaji on it and run the test suite.
|
||||
$(HELM) upgrade --debug --install kamaji-crds ./charts/kamaji-crds --create-namespace --namespace kamaji-system
|
||||
$(HELM) repo add clastix https://clastix.github.io/charts
|
||||
$(HELM) dependency build ./charts/kamaji
|
||||
$(HELM) upgrade --debug --install kamaji ./charts/kamaji --create-namespace --namespace kamaji-system --set "image.tag=$(VERSION)" --set "image.pullPolicy=Never" --set "telemetry.disabled=true"
|
||||
$(MAKE) datastores
|
||||
$(GINKGO) -v ./e2e
|
||||
|
||||
##@ Document
|
||||
|
||||
CAPI_URL = https://github.com/clastix/cluster-api-control-plane-provider-kamaji.git
|
||||
CAPI_DIR := $(shell mktemp -d)
|
||||
CRDS_DIR := $(shell mktemp -d)
|
||||
|
||||
.PHONY: apidoc
|
||||
apidoc: apidocs-gen
|
||||
$(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl
|
||||
@cp charts/kamaji/crds/*.yaml $(CRDS_DIR)
|
||||
@git clone $(CAPI_URL) $(CAPI_DIR)
|
||||
@cp $(CAPI_DIR)/config/crd/bases/*.yaml $(CRDS_DIR)
|
||||
@rm -rf $(CAPI_DIR)
|
||||
$(APIDOCS_GEN) crdoc --resources $(CRDS_DIR) --output docs/content/reference/api.md --template docs/templates/reference-cr.tmpl
|
||||
@rm -rf $(CRDS_DIR)
|
||||
|
||||
15
NOTICE
Normal file
@@ -0,0 +1,15 @@
|
||||
Kamaji — The Kubernetes Control Plane Manager: copyright 2022 Clastix Labs
|
||||
Licensed under the Apache License, Version 2.0: https://kamaji.clastix.io
|
||||
|
||||
This product includes software developed by Clastix Labs and the Kamaji open-source community under the Apache License, Version 2.0.
|
||||
|
||||
Kamaji powers Kubernetes Control Planes at scale for companies worldwide.
|
||||
|
||||
We encourage all commercial products and services using Kamaji to acknowledge this publicly and join our growing ecosystem of adopters.
|
||||
|
||||
You can support the Kamaji community by:
|
||||
- Listing Kamaji in your product's "Open Source Credits" or similar section
|
||||
- Adding your organization to the Adopters list on GitHub: https://github.com/clastix/kamaji/blob/master/ADOPTERS.md
|
||||
- Mentioning Kamaji on your company or product website
|
||||
|
||||
Public acknowledgement strengthens the open-source ecosystem and helps ensure the sustainability of the project you rely on.
|
||||
17
PROJECT
@@ -7,6 +7,15 @@ plugins:
|
||||
projectName: operator
|
||||
repo: github.com/clastix/kamaji
|
||||
resources:
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: false
|
||||
controller: true
|
||||
domain: clastix.io
|
||||
group: kamaji
|
||||
kind: KubeconfigGenerator
|
||||
path: github.com/clastix/kamaji/api/v1alpha1
|
||||
version: v1alpha1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
namespaced: true
|
||||
@@ -16,10 +25,6 @@ resources:
|
||||
kind: TenantControlPlane
|
||||
path: github.com/clastix/kamaji/api/v1alpha1
|
||||
version: v1alpha1
|
||||
webhooks:
|
||||
defaulting: true
|
||||
validation: true
|
||||
webhookVersion: v1
|
||||
- api:
|
||||
crdVersion: v1
|
||||
domain: clastix.io
|
||||
@@ -27,8 +32,4 @@ resources:
|
||||
kind: DataStore
|
||||
path: github.com/clastix/kamaji/api/v1alpha1
|
||||
version: v1alpha1
|
||||
webhooks:
|
||||
defaulting: true
|
||||
validation: true
|
||||
webhookVersion: v1
|
||||
version: "3"
|
||||
|
||||
181
README.md
@@ -3,64 +3,161 @@
|
||||
<p align="left">
|
||||
<img src="https://img.shields.io/github/license/clastix/kamaji"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/kamaji"/>
|
||||
<a href="https://github.com/clastix/kamaji/releases">
|
||||
<img src="https://img.shields.io/github/v/release/clastix/kamaji"/>
|
||||
</a>
|
||||
<a href="https://github.com/clastix/kamaji/releases"><img src="https://img.shields.io/github/v/release/clastix/kamaji"/></a>
|
||||
<img src="https://goreportcard.com/badge/github.com/clastix/kamaji">
|
||||
<a href="https://kubernetes.slack.com/archives/C03GLTTMWNN"><img alt="#kamaji on Kubernetes Slack" src="https://img.shields.io/badge/slack-@kubernetes/kamaji-blue.svg?logo=slack"/></a>
|
||||
</p>
|
||||
|
||||
**Kamaji** deploys and operates **Kubernetes** at scale with a fraction of the operational burden.
|
||||

|
||||

|
||||
|
||||
<p align="center" style="padding: 6px 6px">
|
||||
<img src="assets/kamaji-logo.png" />
|
||||
</p>
|
||||
### 🤔 What is Kamaji?
|
||||
|
||||
## Why we are building it?
|
||||
Global hyper-scalers are leading the Managed Kubernetes space, while other cloud providers, as well as large corporations, are struggling to offer the same experience to their DevOps teams because of the lack of the right tools. Also, current Kubernetes solutions are mainly designed with an enterprise-first approach and they are too costly when deployed at scale.
|
||||
**Kamaji** is the **Kubernetes Control Plane Manager** leveraging on the concept of [**Hosted Control Plane**](https://clastix.io/post/the-raise-of-hosted-control-plane-in-kubernetes/).
|
||||
|
||||
**Kamaji** aims to solve these pains by leveraging multi-tenancy and simplifying how to run multiple control planes on the same infrastructure with a fraction of the operational burden.
|
||||
Kamaji's approach is based on running the Kubernetes Control Plane components in Pods instead of dedicated machines.
|
||||
This allows operating Kubernetes clusters at scale, with a fraction of the operational burden.
|
||||
Thanks to this approach, running multiple Control Planes can be cheaper and easier to deploy and operate.
|
||||
|
||||
## How it works
|
||||
Kamaji turns any Kubernetes cluster into an _“admin cluster”_ to orchestrate other Kubernetes clusters called _“tenant clusters”_. Kamaji is special because the Control Planes of _“tenant clusters”_ are just regular pods instead of dedicated Virtual Machines. This solution makes running Control Planes at scale cheaper and easier to deploy and operate.
|
||||
_Kamaji is like a fleet of Site Reliability Engineers with expertise codified into its logic, working 24/7 to keep up and running your Control Planes._
|
||||
|
||||

|
||||

|
||||
<img src="docs/content/images/architecture.png" width="600" style="display: block; margin: 0 auto">
|
||||
|
||||
## Getting started
|
||||
### 📖 How it works
|
||||
|
||||
Please refer to the [Getting Started guide](https://kamaji.clastix.io/getting-started/) to deploy a minimal setup of Kamaji on KinD.
|
||||
Kamaji is extending the Kubernetes API capabilities thanks to [Custom Resource Definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#customresourcedefinitions).
|
||||
|
||||
## Features
|
||||
By installing Kamaji, two pairs of new APIs will be available:
|
||||
|
||||
- **Self Service Kubernetes:** leave users the freedom to self-provision their Kubernetes clusters according to the assigned boundaries.
|
||||
- **Multi-cluster Management:** centrally manage multiple tenant clusters from a single admin cluster. Happy SREs.
|
||||
- **Cheaper Control Planes:** place multiple tenant control planes on a single node, instead of having three nodes for a single control plane.
|
||||
- **Stronger Multi-Tenancy:** leave tenants to access the control plane with admin permissions while keeping the tenant isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes by re-using all the Kubernetes goodies you already know and love.
|
||||
- **Full APIs compliant:** tenant clusters are fully CNCF compliant built with upstream Kubernetes binaries. A user does not see differences between a Kamaji provisioned cluster and a dedicated cluster.
|
||||
- `TenantControlPlane`, the instance definition of your desired Kubernetes Control Plane
|
||||
- `Datastore`, the backing store used by one (or more) `TenantControlPlane`
|
||||
|
||||
## Roadmap
|
||||
The `TenantControlPlane` (short-named as `tcp`) objects are Namespace-scoped and allows configuring every aspect of your desired Control Plane.
|
||||
Besides the Kubernetes configuration values, you can specify the Pod options such as limit, request, tolerations, node selector, etc.,
|
||||
as well as how these should be exposed (e.g.: using a `ClusterIP`, a `LoadBalancer`, or a `NodePort`).
|
||||
|
||||
- [x] Benchmarking
|
||||
- [ ] Stress-test
|
||||
- [x] Support for dynamic address allocation on native Load Balancer
|
||||
The `TenantControlPlane` is the stateless definition of the Control Plane allowing to set up the required components for a full-fledged Kubernetest cluster.
|
||||
The state is managed by the `Datastore` API, a cluster-scoped resource which can hold the data of one or more Kubernetes clusters.
|
||||
|
||||
> For further information about the API specifications and all the available options,
|
||||
> refer to the official [API reference](https://kamaji.clastix.io/reference/api/#tenantcontrolplane).
|
||||
|
||||
### ⭐️ Main features
|
||||
|
||||
- **Fast provisioning time**: depending on the infrastructure, Tenant Control Planes are up and ready to serve traffic in **16 seconds**.
|
||||
- **Streamlined update**: the rollout to a new Kubernetes version for a given Tenant Control Plane takes just **10 seconds**, with a Blue/Green deployment to avoid serving mixed Kubernetes versions.
|
||||
- **Resource optimization**: thanks to the Datastore decoupling, there's no need of odd number instances (e.g.: RAFT consensus) by allowing to save up to 60% of HW resources.
|
||||
- **Scale from zero to the moon**: scale down the instance when there's no usage, or automatically scale to support the traffic spikes reusing the Kubernetes patterns.
|
||||
- **Declarative approach, constant reconciliation**: thanks to the Operator pattern, drift detection happens in real-time, maintaining the desired state.
|
||||
- **Automated certificates management**: Kamaji leverages on `kubeadm` and the certificates are automatically created and rotated for you.
|
||||
- **Managing core addons**: Kamaji allows configuring automatically `kube-proxy`, `CoreDNS`, and `konnectivity`, with automatic remediation in case of user errors (e.g.: deleting the `CoreDNS` deployment).
|
||||
- **Auto Healing**: the `TenantControlPlane` objects in the management cluster are tracked by Kamaji, in case of deletion of those, everything is created in an idempotent way.
|
||||
- **Datastore multi-tenancy**: optionally, Kamaji allows running multiple Control Planes on the same _Datastore_ instance leveraging on the multi-tenancy of each driver, decreasing operations and optimizing costs.
|
||||
- **Overcoming `etcd` limitations**: optionally, Kamaji allows using a different _Datastore_ thanks to [`kine`](https://github.com/k3s-io/kine) by supporting `MySQL`, `PostgreSQL`, or `NATS` as an alternative.
|
||||
- **Simplifying mixed-networks setup**: thanks to [`Konnectivity`](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/),
|
||||
the Tenant Control Plane is connected to the worker nodes hosted in a different network, overcoming the no-NAT availability when dealing with nodes with a non routable IP address
|
||||
(e.g.: worker nodes in a different infrastructure).
|
||||
|
||||
### 🚀 Use cases
|
||||
|
||||
- [**Creating a private Managed Kubernetes Service**](https://clastix.io/post/netsons-builds-a-managed-kubernetes-service-with-kamaji-and-open-stack/)
|
||||
- [**Building a Platform as a Service**](https://aenix.io/cozystack/)
|
||||
- [**Overcoming public Managed Kubernetes Services**](https://clastix.io/post/overcoming-eks-limitations-with-kamaji-on-aws/) such as EKS
|
||||
- [**Hybrid infrastructures**](https://clastix.io/post/bridging-the-gap-hybrid-kubernetes-clusters-with-remote-control-planes/):
|
||||
host the Control Plane on the Cloud and worker nodes on prem or vice-versa, according to your needs.
|
||||
- [**Kubernetes at the edge**](https://clastix.io/post/edgevolution-unleashing-the-power-of-kubernetes-clusters-for-a-revolutionary-edge-computing-experience/):
|
||||
take full advantage of the _Kubernetes API Server as a service_ paradigm.
|
||||
- **Kubernetes Control Plane as a Service:** centrally manage multiple Kubernetes clusters from a single management point (_Multi-Cluster management_).
|
||||
- **High-density Control Plane:** place multiple control planes on the same infrastructure, instead of having dedicated machines for each control plane.
|
||||
- **Strong Multi-tenancy:** leave users to access the control plane with admin permissions while keeping them isolated at the infrastructure level.
|
||||
- **Kubernetes Inception:** use Kubernetes to manage Kubernetes with automation, high-availability, fault tolerance, and autoscaling out of the box.
|
||||
- **Bring Your Own Device:** keep the control plane isolated from data plane. Worker nodes can join and run consistently from everywhere: cloud, edge, and data-center.
|
||||
- **Full CNCF compliant:** all clusters are built with upstream Kubernetes binaries, resulting in full CNCF compliant Kubernetes clusters.
|
||||
|
||||
> 🤔 You'd like to do the same but don't know how?
|
||||
> 💡 [CLASTIX](https://clastix.io/) can help you with your needs!
|
||||
|
||||
### 🧑💻 Production grade
|
||||
|
||||
Kamaji is empowering several businesses, and it counts public adopters.
|
||||
Check out the [adopters](./ADOPTERS.md) file to learn more.
|
||||
|
||||
> 🤗 If you're using Kamaji, share your love by opening a PR!
|
||||
|
||||
### 🍦 Vanilla Kubernetes clusters
|
||||
|
||||
Kamaji is **not** yet-another-Kubernetes distribution: you have full freedom on the technology stack to provide to end users.
|
||||
Kamaji is a perfect fit for Platform Engineering, hiding the complexity of the Control Plane management to developers and DevOps engineers.
|
||||
|
||||
The provided Kubernetes Control Planes are [CNCF compliant clusters](https://kamaji.clastix.io/reference/conformance/).
|
||||
|
||||
<img src="https://raw.githubusercontent.com/cncf/artwork/master/projects/kubernetes/certified-kubernetes/versionless/color/certified-kubernetes-color.png" style="display: block; width: 75px; margin: 0 auto">
|
||||
|
||||
### 🐢 Cluster API support
|
||||
|
||||
Kamaji is **not** a [Cluster API](https://cluster-api.sigs.k8s.io/) replacement, rather, it plays very well with it.
|
||||
|
||||
Since Kamaji is just focusing on the Control Plane a [Kamaji's Cluster API Control Plane provider](https://github.com/clastix/cluster-api-control-plane-provider-kamaji) has been developed.
|
||||
|
||||
### 🛣️ Roadmap
|
||||
|
||||
- [x] Dynamic address on Load Balancer
|
||||
- [x] Zero Downtime Tenant Control Plane upgrade
|
||||
- [x] `konnectivity` integration
|
||||
- [ ] Provisioning of Tenant Control Plane through Cluster APIs
|
||||
- [x] [Join worker nodes from anywhere thanks to Konnectivity](https://kamaji.clastix.io/concepts/#konnectivity)
|
||||
- [x] [Alternative datastore MySQL, PostgreSQL, NATS](https://kamaji.clastix.io/guides/alternative-datastore/)
|
||||
- [x] [Pool of multiple datastores](https://kamaji.clastix.io/concepts/#datastores)
|
||||
- [x] [Seamless migration between datastores](https://kamaji.clastix.io/guides/datastore-migration/)
|
||||
- [ ] Automatic assignment to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane
|
||||
- [x] [Provisioning through Cluster APIs](https://github.com/clastix/cluster-api-control-plane-provider-kamaji)
|
||||
- [ ] Terraform provider
|
||||
- [ ] Custom Prometheus metrics for monitoring and alerting
|
||||
- [x] `kine` integration for MySQL as datastore
|
||||
- [x] `kine` integration for PostgreSQL as datastore
|
||||
- [x] Pool of multiple datastores
|
||||
- [x] Seamless migration between datastore with the same driver
|
||||
- [ ] Automatic assigning of Tenant Control Plane to a datastore
|
||||
- [ ] Autoscaling of Tenant Control Plane pods
|
||||
- [ ] Custom Prometheus metrics
|
||||
|
||||
### 🎥 Multimedia
|
||||
|
||||
## Documentation
|
||||
Please, check the project's [documentation](https://kamaji.clastix.io/) for getting started with Kamaji.
|
||||
- Playlist ▶️ [Tutorials and How-Tos by Dario Tranchitella, CLASTIX](https://www.youtube.com/playlist?list=PLjiUjoV4Ws_3pNsUpTXI-KKk731nD2MQY)
|
||||
- YouTube ▶️ [Metal³ provisioning with Kamaji Hosted Control Planes by Huy Mai, Ericsson](https://youtu.be/u9sbURj6jXY?t=10536)
|
||||
- YouTube ▶️ [Hands-on introduction to Kamaji](https://www.youtube.com/watch?v=HhevxwQWQ88)
|
||||
- YouTube ▶️ [Scaling Kubernetes up to 1,000 Control Planes](https://www.youtube.com/watch?v=W_HXRXJh96U)
|
||||
- YouTube ▶️ [Equinix, Kamaji, and Cluster API](https://www.youtube.com/watch?v=TLBTqROj_wA)
|
||||
- YouTube ▶️ [Rancher & Kamaji: solving multitenancy challenges in the Kubernetes world](https://www.youtube.com/watch?v=VXHNrMmlF8U)
|
||||
- YouTube ▶️ [Enabling Self-Service Kubernetes clusters with Kamaji and Paralus](https://www.youtube.com/watch?v=JWA2LwZazM0)
|
||||
- YouTube ▶️ [Hosted Control Plane on Kubernetes (HPC) with Kamaji and K0mostron by Hervé Leclerc, ALTER WAY](https://www.youtube.com/watch?v=vmRdE2ngn78)
|
||||
- Medium 📖 [Set up Virtual Control Planes with Kamaji on Minikube, by Ben Soer](https://medium.com/@bensoer/set-up-virtual-control-planes-with-kamaji-on-minikube-a540be0275aa)
|
||||
- Hands-On tutorial 📖 [How to build your own managed Kubernetes service on Hetzner Cloud, by Hans Jörg Wieland](https://wieland.tech/blog/kamaji-cluster-api-and-etcd)
|
||||
|
||||
## Contributions
|
||||
Kamaji is Open Source with Apache 2 license and any contribution is welcome.
|
||||
### 🏷️ Versioning
|
||||
|
||||
## Community
|
||||
Join the [Kubernetes Slack Workspace](https://slack.k8s.io/) and the [`#kamaji`](https://kubernetes.slack.com/archives/C03GLTTMWNN) channel to meet end-users and contributors.
|
||||
Versioning adheres to the [Semantic Versioning](http://semver.org/) principles.
|
||||
A full list of the available releases is available in the GitHub repository's [**Release** section](https://github.com/clastix/kamaji/releases).
|
||||
|
||||
### 📄 Documentation
|
||||
|
||||
Further documentation can be found on the official [Kamaji documentation website](https://kamaji.clastix.io/).
|
||||
|
||||
### 🤝 Contributions
|
||||
|
||||
Contributions are highly appreciated and very welcomed!
|
||||
|
||||
In case of bugs, please, check if the issue has been already opened by checking the [GitHub Issues](https://github.com/clastix/kamaji/issues) section.
|
||||
In case it isn't, you can open a new one: a detailed report will help us to replicate it, assess it, and work on a fix.
|
||||
|
||||
You can express your intention in working on the fix on your own.
|
||||
The commit messages are checked according to the described [semantics](https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md#semantics).
|
||||
Commits are used to generate the changelog, and their author will be referenced in it.
|
||||
|
||||
In case of **✨ Feature Requests** please use the [Discussion's Feature Request section](https://github.com/clastix/kamaji/discussions/categories/feature-requests).
|
||||
|
||||
### 📝 License
|
||||
|
||||
Kamaji is licensed under Apache 2.0.
|
||||
The code is provided as-is with no warranties.
|
||||
|
||||
### 🛟 Commercial Support
|
||||
|
||||
 [CLASTIX](https://clastix.io/) is the commercial company behind Kamaji and the Cluster API Control Plane provider.
|
||||
|
||||
If you're looking to run Kamaji in production and would like to learn more, **CLASTIX** can help by offering [Open Source support plans](https://clastix.io/support),
|
||||
as well as providing a comprehensive Enterprise Platform named [CLASTIX Enterprise Platform](https://clastix.cloud/), built on top of the Kamaji and [Capsule](https://capsule.clastix.io/) project (now donated to CNCF as a Sandbox project).
|
||||
|
||||
Feel free to get in touch with the provided [Contact form](https://clastix.io/contact).
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/validate--v1-secret,mutating=false,failurePolicy=ignore,sideEffects=None,groups="",resources=secrets,verbs=delete,versions=v1,name=vdatastoresecrets.kb.io,admissionReviewVersions=v1
|
||||
|
||||
type dataStoreSecretValidator struct {
|
||||
log logr.Logger
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateCreate(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateUpdate(context.Context, runtime.Object, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
|
||||
secret := obj.(*corev1.Secret) //nolint:forcetypeassert
|
||||
|
||||
dsList := &DataStoreList{}
|
||||
|
||||
if err := d.client.List(ctx, dsList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(DatastoreUsedSecretNamespacedNameKey, fmt.Sprintf("%s/%s", secret.GetNamespace(), secret.GetName()))}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(dsList.Items) > 0 {
|
||||
var res []string
|
||||
|
||||
for _, ds := range dsList.Items {
|
||||
res = append(res, ds.GetName())
|
||||
}
|
||||
|
||||
return fmt.Errorf("the Secret is used by the following kamajiv1alpha1.DataStores and cannot be deleted (%s)", strings.Join(res, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreSecretValidator) Default(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=etcd;MySQL;PostgreSQL
|
||||
//+kubebuilder:validation:Enum=etcd;MySQL;PostgreSQL;NATS
|
||||
//+kubebuilder:validation:XValidation:rule="self == oldSelf",message="Datastore driver is immutable"
|
||||
|
||||
type Driver string
|
||||
|
||||
@@ -16,13 +17,21 @@ var (
|
||||
EtcdDriver Driver = "etcd"
|
||||
KineMySQLDriver Driver = "MySQL"
|
||||
KinePostgreSQLDriver Driver = "PostgreSQL"
|
||||
KineNatsDriver Driver = "NATS"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
//+kubebuilder:validation:MinItems=1
|
||||
|
||||
type Endpoints []string
|
||||
|
||||
// DataStoreSpec defines the desired state of DataStore.
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.certificateAuthority.privateKey.secretReference) || has(self.tlsConfig.certificateAuthority.privateKey.content))) : true", message="certificateAuthority privateKey must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content))) : true", message="clientCertificate must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver == \"etcd\") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.privateKey.secretReference) || has(self.tlsConfig.clientCertificate.privateKey.content))) : true", message="clientCertificate privateKey must have secretReference or content when driver is etcd"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.tlsConfig) && has(self.tlsConfig.clientCertificate)) ? (((has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content)))) : true", message="When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.basicAuth)) ? ((has(self.basicAuth.username.secretReference) || has(self.basicAuth.username.content))) : true", message="When driver is not etcd and basicAuth exists, username must have secretReference or content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\" && has(self.basicAuth)) ? ((has(self.basicAuth.password.secretReference) || has(self.basicAuth.password.content))) : true", message="When driver is not etcd and basicAuth exists, password must have secretReference or content"
|
||||
// +kubebuilder:validation:XValidation:rule="(self.driver != \"etcd\") ? (has(self.tlsConfig) || has(self.basicAuth)) : true", message="When driver is not etcd, either tlsConfig or basicAuth must be provided"
|
||||
type DataStoreSpec struct {
|
||||
// The driver to use to connect to the shared datastore.
|
||||
Driver Driver `json:"driver"`
|
||||
@@ -33,7 +42,8 @@ type DataStoreSpec struct {
|
||||
// This value is optional.
|
||||
BasicAuth *BasicAuth `json:"basicAuth,omitempty"`
|
||||
// Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
TLSConfig TLSConfig `json:"tlsConfig"`
|
||||
// This value is optional.
|
||||
TLSConfig *TLSConfig `json:"tlsConfig,omitempty"`
|
||||
}
|
||||
|
||||
// TLSConfig contains the information used to connect to the data store using a secured connection.
|
||||
@@ -42,7 +52,7 @@ type TLSConfig struct {
|
||||
// The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
CertificateAuthority CertKeyPair `json:"certificateAuthority"`
|
||||
// Specifies the SSL/TLS key and private key pair used to connect to the data store.
|
||||
ClientCertificate ClientCertificate `json:"clientCertificate"`
|
||||
ClientCertificate *ClientCertificate `json:"clientCertificate,omitempty"`
|
||||
}
|
||||
|
||||
type ClientCertificate struct {
|
||||
@@ -80,6 +90,9 @@ type SecretReference struct {
|
||||
|
||||
// DataStoreStatus defines the observed state of DataStore.
|
||||
type DataStoreStatus struct {
|
||||
// ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
// List of the Tenant Control Planes, namespaced named, using this data store.
|
||||
UsedBy []string `json:"usedBy,omitempty"`
|
||||
}
|
||||
@@ -89,6 +102,7 @@ type DataStoreStatus struct {
|
||||
//+kubebuilder:resource:scope=Cluster
|
||||
//+kubebuilder:printcolumn:name="Driver",type="string",JSONPath=".spec.driver",description="Kamaji data store driver"
|
||||
//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
//+kubebuilder:metadata:annotations={"cert-manager.io/inject-ca-from=kamaji-system/kamaji-serving-cert"}
|
||||
|
||||
// DataStore is the Schema for the datastores API.
|
||||
type DataStore struct {
|
||||
|
||||
@@ -1,185 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-datastore,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update,versions=v1alpha1,name=mdatastore.kb.io,admissionReviewVersions=v1
|
||||
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-datastore,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=datastores,verbs=create;update;delete,versions=v1alpha1,name=vdatastore.kb.io,admissionReviewVersions=v1
|
||||
|
||||
func (in *DataStore) SetupWebhookWithManager(mgr ctrl.Manager) error {
|
||||
secretValidator := &dataStoreSecretValidator{
|
||||
log: mgr.GetLogger().WithName("datastore-secret-webhook"),
|
||||
client: mgr.GetClient(),
|
||||
}
|
||||
|
||||
if err := ctrl.NewWebhookManagedBy(mgr).For(&corev1.Secret{}).WithValidator(secretValidator).Complete(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dsValidator := &dataStoreValidator{
|
||||
log: mgr.GetLogger().WithName("datastore-webhook"),
|
||||
client: mgr.GetClient(),
|
||||
}
|
||||
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(in).
|
||||
WithValidator(dsValidator).
|
||||
WithDefaulter(dsValidator).
|
||||
Complete()
|
||||
}
|
||||
|
||||
type dataStoreValidator struct {
|
||||
log logr.Logger
|
||||
client client.Client
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateCreate(ctx context.Context, obj runtime.Object) error {
|
||||
ds, ok := obj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
if err := d.validate(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
old, ok := oldObj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
ds, ok := newObj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
d.log.Info("validate update", "name", ds.GetName())
|
||||
|
||||
if ds.Spec.Driver != old.Spec.Driver {
|
||||
return fmt.Errorf("driver of a DataStore cannot be changed")
|
||||
}
|
||||
|
||||
if err := d.validate(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) ValidateDelete(ctx context.Context, obj runtime.Object) error {
|
||||
ds, ok := obj.(*DataStore)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.DataStore")
|
||||
}
|
||||
|
||||
tcpList := &TenantControlPlaneList{}
|
||||
|
||||
if err := d.client.List(ctx, tcpList, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(TenantControlPlaneUsedDataStoreKey, ds.GetName())}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(tcpList.Items) > 0 {
|
||||
return fmt.Errorf("the DataStore is used by multiple TenantControlPlanes and cannot be removed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) Default(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validate(ctx context.Context, ds *DataStore) error {
|
||||
if ds.Spec.BasicAuth != nil {
|
||||
if err := d.validateBasicAuth(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.validateTLSConfig(ctx, ds); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateBasicAuth(ctx context.Context, ds *DataStore) error {
|
||||
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Password); err != nil {
|
||||
return fmt.Errorf("basic-auth password is not valid, %w", err)
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.BasicAuth.Username); err != nil {
|
||||
return fmt.Errorf("basic-auth username is not valid, %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateTLSConfig(ctx context.Context, ds *DataStore) error {
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.CertificateAuthority.Certificate); err != nil {
|
||||
return fmt.Errorf("CA certificate is not valid, %w", err)
|
||||
}
|
||||
|
||||
if ds.Spec.Driver == EtcdDriver {
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey == nil {
|
||||
return fmt.Errorf("CA private key is required when using the etcd driver")
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil {
|
||||
if err := d.validateContentReference(ctx, *ds.Spec.TLSConfig.CertificateAuthority.PrivateKey); err != nil {
|
||||
return fmt.Errorf("CA private key is not valid, %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.Certificate); err != nil {
|
||||
return fmt.Errorf("client certificate is not valid, %w", err)
|
||||
}
|
||||
|
||||
if err := d.validateContentReference(ctx, ds.Spec.TLSConfig.ClientCertificate.PrivateKey); err != nil {
|
||||
return fmt.Errorf("client private key is not valid, %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dataStoreValidator) validateContentReference(ctx context.Context, ref ContentRef) error {
|
||||
switch {
|
||||
case len(ref.Content) > 0:
|
||||
return nil
|
||||
case ref.SecretRef == nil:
|
||||
return fmt.Errorf("the Secret reference is mandatory when bare content is not specified")
|
||||
case len(ref.SecretRef.SecretReference.Name) == 0:
|
||||
return fmt.Errorf("the Secret reference name is mandatory")
|
||||
case len(ref.SecretRef.SecretReference.Namespace) == 0:
|
||||
return fmt.Errorf("the Secret reference namespace is mandatory")
|
||||
}
|
||||
|
||||
if err := d.client.Get(ctx, types.NamespacedName{Name: ref.SecretRef.SecretReference.Name, Namespace: ref.SecretRef.SecretReference.Namespace}, &corev1.Secret{}); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("secret %s/%s is not found", ref.SecretRef.SecretReference.Namespace, ref.SecretRef.SecretReference.Name)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,7 +4,6 @@
|
||||
// Package v1alpha1 contains API Schema definitions for the kamaji v1alpha1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=kamaji.clastix.io
|
||||
//nolint
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
|
||||
@@ -43,20 +43,24 @@ func (d *DatastoreUsedSecret) ExtractValue() client.IndexerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig != nil {
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.Certificate.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil && ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig.CertificateAuthority.PrivateKey != nil && ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.CertificateAuthority.PrivateKey.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
if ds.Spec.TLSConfig.ClientCertificate != nil {
|
||||
if ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.Certificate.SecretRef))
|
||||
}
|
||||
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
if ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef != nil {
|
||||
res = append(res, d.namespacedName(*ds.Spec.TLSConfig.ClientCertificate.PrivateKey.SecretRef))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
|
||||
47
api/v1alpha1/indexer_gateway_listener.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
controllerruntime "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
GatewayListenerNameKey = "spec.listeners.name"
|
||||
)
|
||||
|
||||
type GatewayListener struct{}
|
||||
|
||||
func (g *GatewayListener) Object() client.Object {
|
||||
return &gatewayv1.Gateway{}
|
||||
}
|
||||
|
||||
func (g *GatewayListener) Field() string {
|
||||
return GatewayListenerNameKey
|
||||
}
|
||||
|
||||
func (g *GatewayListener) ExtractValue() client.IndexerFunc {
|
||||
return func(object client.Object) []string {
|
||||
gateway := object.(*gatewayv1.Gateway) //nolint:forcetypeassert
|
||||
|
||||
listenerNames := make([]string, 0, len(gateway.Spec.Listeners))
|
||||
for _, listener := range gateway.Spec.Listeners {
|
||||
// Create a composite key: namespace/gatewayName/listenerName
|
||||
// This allows us to look up gateways by listener name while ensuring uniqueness
|
||||
key := fmt.Sprintf("%s/%s/%s", gateway.Namespace, gateway.Name, listener.Name)
|
||||
listenerNames = append(listenerNames, key)
|
||||
}
|
||||
|
||||
return listenerNames
|
||||
}
|
||||
}
|
||||
|
||||
func (g *GatewayListener) SetupWithManager(ctx context.Context, mgr controllerruntime.Manager) error {
|
||||
return mgr.GetFieldIndexer().IndexField(ctx, g.Object(), g.Field(), g.ExtractValue())
|
||||
}
|
||||
94
api/v1alpha1/kubeconfiggenerator_types.go
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
ManagedByLabel = "kamaji.clastix.io/managed-by"
|
||||
ManagedForLabel = "kamaji.clastix.io/managed-for"
|
||||
)
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
//+kubebuilder:metadata:annotations={"cert-manager.io/inject-ca-from=kamaji-system/kamaji-serving-cert"}
|
||||
//+kubebuilder:resource:scope=Cluster,shortName=kc,categories=kamaji
|
||||
|
||||
// KubeconfigGenerator is the Schema for the kubeconfiggenerators API.
|
||||
type KubeconfigGenerator struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec KubeconfigGeneratorSpec `json:"spec,omitempty"`
|
||||
Status KubeconfigGeneratorStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// CompoundValue allows defining a static, or a dynamic value.
|
||||
// Options are mutually exclusive, just one should be picked up.
|
||||
// +kubebuilder:validation:XValidation:rule="(has(self.stringValue) || has(self.fromDefinition)) && !(has(self.stringValue) && has(self.fromDefinition))",message="Either stringValue or fromDefinition must be set, but not both."
|
||||
type CompoundValue struct {
|
||||
// StringValue is a static string value.
|
||||
StringValue string `json:"stringValue,omitempty"`
|
||||
// FromDefinition is used to generate a dynamic value,
|
||||
// it uses the dot notation to access fields from the referenced TenantControlPlane object:
|
||||
// e.g.: metadata.name
|
||||
FromDefinition string `json:"fromDefinition,omitempty"`
|
||||
}
|
||||
|
||||
type KubeconfigGeneratorSpec struct {
|
||||
// NamespaceSelector is used to filter Namespaces from which the generator should extract TenantControlPlane objects.
|
||||
NamespaceSelector metav1.LabelSelector `json:"namespaceSelector,omitempty"`
|
||||
// TenantControlPlaneSelector is used to filter the TenantControlPlane objects that should be address by the generator.
|
||||
TenantControlPlaneSelector metav1.LabelSelector `json:"tenantControlPlaneSelector,omitempty"`
|
||||
// Groups is resolved a set of strings used to assign the x509 organisations field.
|
||||
// It will be recognised by Kubernetes as user groups.
|
||||
Groups []CompoundValue `json:"groups,omitempty"`
|
||||
// User resolves to a string to identify the client, assigned to the x509 Common Name field.
|
||||
User CompoundValue `json:"user"`
|
||||
// ControlPlaneEndpointFrom is the key used to extract the Tenant Control Plane endpoint that must be used by the generator.
|
||||
// The targeted Secret is the `${TCP}-admin-kubeconfig` one, default to `admin.svc`.
|
||||
//+kubebuilder:default="admin.svc"
|
||||
ControlPlaneEndpointFrom string `json:"controlPlaneEndpointFrom,omitempty"`
|
||||
}
|
||||
|
||||
type KubeconfigGeneratorStatusError struct {
|
||||
// Resource is the Namespaced name of the errored resource.
|
||||
//+kubebuilder:validation:Required
|
||||
Resource string `json:"resource"`
|
||||
// Message is the error message recorded upon the last generator run.
|
||||
//+kubebuilder:validation:Required
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// KubeconfigGeneratorStatus defines the observed state of KubeconfigGenerator.
|
||||
type KubeconfigGeneratorStatus struct {
|
||||
// ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
// Resources is the sum of targeted TenantControlPlane objects.
|
||||
//+kubebuilder:default=0
|
||||
Resources int `json:"resources"`
|
||||
// AvailableResources is the sum of successfully generated resources.
|
||||
// In case of a different value compared to Resources, check the field errors.
|
||||
//+kubebuilder:default=0
|
||||
AvailableResources int `json:"availableResources"`
|
||||
// Errors is the list of failed kubeconfig generations.
|
||||
Errors []KubeconfigGeneratorStatusError `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
|
||||
// KubeconfigGeneratorList contains a list of TenantControlPlane.
|
||||
type KubeconfigGeneratorList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []KubeconfigGenerator `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&KubeconfigGenerator{}, &KubeconfigGeneratorList{})
|
||||
}
|
||||
55
api/v1alpha1/suite_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
)
|
||||
|
||||
var (
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "TenantControlPlane Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{
|
||||
filepath.Join("..", "..", "charts", "kamaji", "crds"),
|
||||
// filepath.Join("../..", "chart", "kamaji", "crds"),
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
10
api/v1alpha1/tenantcontrolplane_const.go
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
const (
|
||||
// PausedReconciliationAnnotation is an annotation that can be applied to
|
||||
// Tenant Control Plane objects to prevent the controller from processing such a resource.
|
||||
PausedReconciliationAnnotation = "kamaji.clastix.io/paused"
|
||||
)
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
@@ -26,12 +26,12 @@ func (in *TenantControlPlane) AssignedControlPlaneAddress() (string, int32, erro
|
||||
|
||||
address, portString, err := net.SplitHostPort(in.Status.ControlPlaneEndpoint)
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrap(err, "cannot split host port from Tenant Control Plane endpoint")
|
||||
return "", 0, fmt.Errorf("cannot split host port from Tenant Control Plane endpoint: %w", err)
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(portString)
|
||||
if err != nil {
|
||||
return "", 0, errors.Wrap(err, "cannot convert Tenant Control Plane port from endpoint")
|
||||
return "", 0, fmt.Errorf("cannot convert Tenant Control Plane port from endpoint: %w", err)
|
||||
}
|
||||
|
||||
return address, int32(port), nil
|
||||
@@ -46,7 +46,7 @@ func (in *TenantControlPlane) DeclaredControlPlaneAddress(ctx context.Context, c
|
||||
svc := &corev1.Service{}
|
||||
err := client.Get(ctx, types.NamespacedName{Namespace: in.GetNamespace(), Name: in.GetName()}, svc)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "cannot retrieve Service for the TenantControlPlane")
|
||||
return "", fmt.Errorf("cannot retrieve Service for the TenantControlPlane: %w", err)
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -61,12 +61,51 @@ func (in *TenantControlPlane) DeclaredControlPlaneAddress(ctx context.Context, c
|
||||
return "", kamajierrors.NonExposedLoadBalancerError{}
|
||||
}
|
||||
|
||||
for _, lb := range loadBalancerStatus.Ingress {
|
||||
if ip := lb.IP; len(ip) > 0 {
|
||||
return ip, nil
|
||||
}
|
||||
return getLoadBalancerAddress(loadBalancerStatus.Ingress)
|
||||
}
|
||||
|
||||
return "", kamajierrors.MissingValidIPError{}
|
||||
}
|
||||
|
||||
// getLoadBalancerAddress extracts the IP address from LoadBalancer ingress.
|
||||
// It also checks and rejects hostname usage for LoadBalancer ingress.
|
||||
//
|
||||
// Reasons for not supporting hostnames:
|
||||
// - DNS resolution can differ across environments, leading to inconsistent behavior.
|
||||
// - It may cause connectivity problems between Kubernetes components.
|
||||
// - The DNS resolution could change over time, potentially breaking cluster-to-API-server connections.
|
||||
//
|
||||
// Recommended solutions:
|
||||
// - Use a static IP address to ensure stable and predictable communication within the cluster.
|
||||
// - If a hostname is necessary, consider setting up a Virtual IP (VIP) for the given hostname.
|
||||
// - Alternatively, use an external load balancer that can provide a stable IP address.
|
||||
//
|
||||
// Note: Implementing L7 routing with the API Server requires a deep understanding of the implications.
|
||||
// Users should be aware of the complexities involved, including potential issues with TLS passthrough
|
||||
// for client-based certificate authentication in Ingress expositions.
|
||||
func getLoadBalancerAddress(ingress []corev1.LoadBalancerIngress) (string, error) {
|
||||
for _, lb := range ingress {
|
||||
if ip := lb.IP; len(ip) > 0 {
|
||||
return ip, nil
|
||||
}
|
||||
if hostname := lb.Hostname; len(hostname) > 0 {
|
||||
return "", fmt.Errorf("hostname not supported for LoadBalancer ingress: use static IP instead")
|
||||
}
|
||||
}
|
||||
|
||||
return "", kamajierrors.MissingValidIPError{}
|
||||
}
|
||||
|
||||
func (in *TenantControlPlane) normalizeNamespaceName() string {
|
||||
// The dash character (-) must be replaced with an underscore, PostgreSQL is complaining about it:
|
||||
// https://github.com/clastix/kamaji/issues/328
|
||||
return strings.ReplaceAll(fmt.Sprintf("%s_%s", in.GetNamespace(), in.GetName()), "-", "_")
|
||||
}
|
||||
|
||||
func (in *TenantControlPlane) GetDefaultDatastoreUsername() string {
|
||||
return in.normalizeNamespaceName()
|
||||
}
|
||||
|
||||
func (in *TenantControlPlane) GetDefaultDatastoreSchema() string {
|
||||
return in.normalizeNamespaceName()
|
||||
}
|
||||
|
||||
@@ -8,5 +8,5 @@ package v1alpha1
|
||||
// +kubebuilder:object:generate=false
|
||||
type KubeadmConfigChecksumDependant interface {
|
||||
GetChecksum() string
|
||||
SetChecksum(string)
|
||||
SetChecksum(checksum string)
|
||||
}
|
||||
|
||||
83
api/v1alpha1/tenantcontrolplane_jsonpatch.go
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
)
|
||||
|
||||
type JSONPatches []JSONPatch
|
||||
|
||||
type JSONPatch struct {
|
||||
// Op is the RFC 6902 JSON Patch operation.
|
||||
//+kubebuilder:validation:Enum=add;remove;replace;move;copy;test
|
||||
Op string `json:"op"`
|
||||
// Path specifies the target location in the JSON document. Use "/" to separate keys; "-" for appending to arrays.
|
||||
Path string `json:"path"`
|
||||
// From specifies the source location for move or copy operations.
|
||||
From string `json:"from,omitempty"`
|
||||
// Value is the operation value to be used when Op is add, replace, test.
|
||||
Value *apiextensionsv1.JSON `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (p JSONPatches) ToJSON() ([]byte, error) {
|
||||
if len(p) == 0 {
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 256)
|
||||
buf = append(buf, '[')
|
||||
|
||||
for i, patch := range p {
|
||||
if i > 0 {
|
||||
buf = append(buf, ',')
|
||||
}
|
||||
|
||||
buf = append(buf, '{')
|
||||
|
||||
buf = append(buf, `"op":"`...)
|
||||
buf = appendEscapedString(buf, patch.Op)
|
||||
buf = append(buf, '"')
|
||||
|
||||
buf = append(buf, `,"path":"`...)
|
||||
buf = appendEscapedString(buf, patch.Path)
|
||||
buf = append(buf, '"')
|
||||
|
||||
if patch.From != "" {
|
||||
buf = append(buf, `,"from":"`...)
|
||||
buf = appendEscapedString(buf, patch.From)
|
||||
buf = append(buf, '"')
|
||||
}
|
||||
|
||||
if patch.Value != nil {
|
||||
buf = append(buf, `,"value":`...)
|
||||
buf = append(buf, patch.Value.Raw...)
|
||||
}
|
||||
|
||||
buf = append(buf, '}')
|
||||
}
|
||||
|
||||
buf = append(buf, ']')
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func appendEscapedString(dst []byte, s string) []byte {
|
||||
for i := range s {
|
||||
switch s[i] {
|
||||
case '\\', '"':
|
||||
dst = append(dst, '\\', s[i])
|
||||
case '\n':
|
||||
dst = append(dst, '\\', 'n')
|
||||
case '\r':
|
||||
dst = append(dst, '\\', 'r')
|
||||
case '\t':
|
||||
dst = append(dst, '\\', 't')
|
||||
default:
|
||||
dst = append(dst, s[i])
|
||||
}
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (in KubeadmPhaseStatus) GetChecksum() string {
|
||||
func (in *KubeadmPhaseStatus) GetChecksum() string {
|
||||
return in.Checksum
|
||||
}
|
||||
|
||||
|
||||
18
api/v1alpha1/tenantcontrolplane_registrysettings.go
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
type RegistrySettings struct {
|
||||
//+kubebuilder:default="registry.k8s.io"
|
||||
Registry string `json:"registry,omitempty"`
|
||||
// The tag to append to all the Control Plane container images.
|
||||
// Optional.
|
||||
TagSuffix string `json:"tagSuffix,omitempty"`
|
||||
//+kubebuilder:default="kube-apiserver"
|
||||
APIServerImage string `json:"apiServerImage,omitempty"`
|
||||
//+kubebuilder:default="kube-controller-manager"
|
||||
ControllerManagerImage string `json:"controllerManagerImage,omitempty"`
|
||||
//+kubebuilder:default="kube-scheduler"
|
||||
SchedulerImage string `json:"schedulerImage,omitempty"`
|
||||
}
|
||||
30
api/v1alpha1/tenantcontrolplane_registrysettings_funcs.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func (r *RegistrySettings) buildContainerImage(name, tag string) string {
|
||||
image := fmt.Sprintf("%s/%s:%s", r.Registry, name, tag)
|
||||
|
||||
if len(r.TagSuffix) > 0 {
|
||||
image += r.TagSuffix
|
||||
}
|
||||
|
||||
return image
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeAPIServerImage(version string) string {
|
||||
return r.buildContainerImage(r.APIServerImage, version)
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeSchedulerImage(version string) string {
|
||||
return r.buildContainerImage(r.SchedulerImage, version)
|
||||
}
|
||||
|
||||
func (r *RegistrySettings) KubeControllerManagerImage(version string) string {
|
||||
return r.buildContainerImage(r.ControllerManagerImage, version)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
|
||||
)
|
||||
|
||||
// APIServerCertificatesStatus defines the observed state of ETCD Certificate for API server.
|
||||
@@ -122,6 +123,12 @@ type ExternalKubernetesObjectStatus struct {
|
||||
LastUpdate metav1.Time `json:"lastUpdate,omitempty"`
|
||||
}
|
||||
|
||||
type KonnectivityAgentStatus struct {
|
||||
ExternalKubernetesObjectStatus `json:",inline"`
|
||||
|
||||
Mode KonnectivityAgentMode `json:"mode,omitempty"`
|
||||
}
|
||||
|
||||
// KonnectivityStatus defines the status of Konnectivity as Addon.
|
||||
type KonnectivityStatus struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
@@ -130,8 +137,9 @@ type KonnectivityStatus struct {
|
||||
Kubeconfig KubeconfigStatus `json:"kubeconfig,omitempty"`
|
||||
ServiceAccount ExternalKubernetesObjectStatus `json:"sa,omitempty"`
|
||||
ClusterRoleBinding ExternalKubernetesObjectStatus `json:"clusterrolebinding,omitempty"`
|
||||
Agent ExternalKubernetesObjectStatus `json:"agent,omitempty"`
|
||||
Agent KonnectivityAgentStatus `json:"agent,omitempty"`
|
||||
Service KubernetesServiceStatus `json:"service,omitempty"`
|
||||
Gateway *KubernetesGatewayStatus `json:"gateway,omitempty"`
|
||||
}
|
||||
|
||||
type KonnectivityConfigMap struct {
|
||||
@@ -154,6 +162,9 @@ type AddonsStatus struct {
|
||||
|
||||
// TenantControlPlaneStatus defines the observed state of TenantControlPlane.
|
||||
type TenantControlPlaneStatus struct {
|
||||
// ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
// Storage Status contains information about Kubernetes storage system
|
||||
Storage StorageStatus `json:"storage,omitempty"`
|
||||
// Certificates contains information about the different certificates
|
||||
@@ -181,13 +192,17 @@ type KubernetesStatus struct {
|
||||
Deployment KubernetesDeploymentStatus `json:"deployment,omitempty"`
|
||||
Service KubernetesServiceStatus `json:"service,omitempty"`
|
||||
Ingress *KubernetesIngressStatus `json:"ingress,omitempty"`
|
||||
Gateway *KubernetesGatewayStatus `json:"gateway,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=Provisioning;CertificateAuthorityRotating;Upgrading;Migrating;Ready;NotReady
|
||||
// +kubebuilder:validation:Enum=Unknown;Provisioning;CertificateAuthorityRotating;Upgrading;Migrating;Ready;NotReady;Sleeping;WriteLimited
|
||||
type KubernetesVersionStatus string
|
||||
|
||||
var (
|
||||
VersionUnknown KubernetesVersionStatus = "Unknown"
|
||||
VersionProvisioning KubernetesVersionStatus = "Provisioning"
|
||||
VersionSleeping KubernetesVersionStatus = "Sleeping"
|
||||
VersionWriteLimited KubernetesVersionStatus = "WriteLimited"
|
||||
VersionCARotating KubernetesVersionStatus = "CertificateAuthorityRotating"
|
||||
VersionUpgrading KubernetesVersionStatus = "Upgrading"
|
||||
VersionMigrating KubernetesVersionStatus = "Migrating"
|
||||
@@ -198,7 +213,7 @@ var (
|
||||
type KubernetesVersion struct {
|
||||
// Version is the running Kubernetes version of the Tenant Control Plane.
|
||||
Version string `json:"version,omitempty"`
|
||||
// +kubebuilder:default=Provisioning
|
||||
//+kubebuilder:default=Provisioning
|
||||
// Status returns the current status of the Kubernetes version, such as its provisioning state, or completed upgrade.
|
||||
Status *KubernetesVersionStatus `json:"status,omitempty"`
|
||||
}
|
||||
@@ -235,3 +250,25 @@ type KubernetesIngressStatus struct {
|
||||
// The namespace which the Ingress for the given cluster is deployed.
|
||||
Namespace string `json:"namespace"`
|
||||
}
|
||||
|
||||
type GatewayAccessPoint struct {
|
||||
Type *gatewayv1.AddressType `json:"type"`
|
||||
Value string `json:"value"`
|
||||
Port int32 `json:"port"`
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen=false
|
||||
type RouteStatus = gatewayv1.RouteStatus
|
||||
|
||||
// KubernetesGatewayStatus defines the status for the Tenant Control Plane Gateway in the management cluster.
|
||||
type KubernetesGatewayStatus struct {
|
||||
// The TLSRoute status as resported by the gateway controllers.
|
||||
RouteStatus `json:",inline"`
|
||||
|
||||
// Reference to the route created for this tenant.
|
||||
RouteRef corev1.LocalObjectReference `json:"routeRef,omitempty"`
|
||||
|
||||
// A list of valid access points that the route exposes.
|
||||
AccessPoints []GatewayAccessPoint `json:"accessPoints,omitempty"`
|
||||
}
|
||||
|
||||
@@ -7,29 +7,52 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
|
||||
)
|
||||
|
||||
// NetworkProfileSpec defines the desired state of NetworkProfile.
|
||||
type NetworkProfileSpec struct {
|
||||
// LoadBalancerSourceRanges restricts the IP ranges that can access
|
||||
// the LoadBalancer type Service. This field defines a list of IP
|
||||
// address ranges (in CIDR format) that are allowed to access the service.
|
||||
// If left empty, the service will allow traffic from all IP ranges (0.0.0.0/0).
|
||||
// This feature is useful for restricting access to API servers or services
|
||||
// to specific networks for security purposes.
|
||||
// Example: {"192.168.1.0/24", "10.0.0.0/8"}
|
||||
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
|
||||
// Specify the LoadBalancer class in case of multiple load balancer implementations.
|
||||
// Field supported only for Tenant Control Plane instances exposed using a LoadBalancer Service.
|
||||
// +kubebuilder:validation:MinLength=1
|
||||
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="LoadBalancerClass is immutable"
|
||||
LoadBalancerClass *string `json:"loadBalancerClass,omitempty"`
|
||||
// Address where API server of will be exposed.
|
||||
// In case of LoadBalancer Service, this can be empty in order to use the exposed IP provided by the cloud controller manager.
|
||||
Address string `json:"address,omitempty"`
|
||||
// The default domain name used for DNS resolution within the cluster.
|
||||
//+kubebuilder:default="cluster.local"
|
||||
//+kubebuilder:validation:XValidation:rule="self == oldSelf",message="changing the cluster domain is not supported"
|
||||
//+kubebuilder:validation:Pattern=.*\..*
|
||||
ClusterDomain string `json:"clusterDomain,omitempty"`
|
||||
// AllowAddressAsExternalIP will include tenantControlPlane.Spec.NetworkProfile.Address in the section of
|
||||
// ExternalIPs of the Kubernetes Service (only ClusterIP or NodePort)
|
||||
AllowAddressAsExternalIP bool `json:"allowAddressAsExternalIP,omitempty"`
|
||||
// Port where API server of will be exposed
|
||||
// +kubebuilder:default=6443
|
||||
//+kubebuilder:default=6443
|
||||
Port int32 `json:"port,omitempty"`
|
||||
// CertSANs sets extra Subject Alternative Names (SANs) for the API Server signing certificate.
|
||||
// Use this field to add additional hostnames when exposing the Tenant Control Plane with third solutions.
|
||||
CertSANs []string `json:"certSANs,omitempty"`
|
||||
// Kubernetes Service
|
||||
// +kubebuilder:default="10.96.0.0/16"
|
||||
// CIDR for Kubernetes Services: if empty, defaulted to 10.96.0.0/16.
|
||||
//+kubebuilder:default="10.96.0.0/16"
|
||||
ServiceCIDR string `json:"serviceCidr,omitempty"`
|
||||
// CIDR for Kubernetes Pods
|
||||
// +kubebuilder:default="10.244.0.0/16"
|
||||
// CIDR for Kubernetes Pods: if empty, defaulted to 10.244.0.0/16.
|
||||
//+kubebuilder:default="10.244.0.0/16"
|
||||
PodCIDR string `json:"podCidr,omitempty"`
|
||||
// +kubebuilder:default={"10.96.0.10"}
|
||||
// The DNS Service for internal resolution, it must match the Service CIDR.
|
||||
// In case of an empty value, it is automatically computed according to the Service CIDR, e.g.:
|
||||
// Service CIDR 10.96.0.0/16, the resulting DNS Service IP will be 10.96.0.10 for IPv4,
|
||||
// for IPv6 from the CIDR 2001:db8:abcd::/64 the resulting DNS Service IP will be 2001:db8:abcd::10.
|
||||
DNSServiceIPs []string `json:"dnsServiceIPs,omitempty"`
|
||||
}
|
||||
|
||||
@@ -45,13 +68,22 @@ const (
|
||||
)
|
||||
|
||||
type KubeletSpec struct {
|
||||
// ConfigurationJSONPatches contains the RFC 6902 JSON patches to customise the kubeadm generate configuration,
|
||||
// useful to customise and mangling the configuration according to your needs;
|
||||
// e.g.: configuring the cgroup driver used by Kubelet is possible via the following patch:
|
||||
//
|
||||
// [{"op": "replace", "path": "/cgroupDriver", "value": "systemd"}]
|
||||
ConfigurationJSONPatches JSONPatches `json:"configurationJSONPatches,omitempty"`
|
||||
// Ordered list of the preferred NodeAddressTypes to use for kubelet connections.
|
||||
// Default to Hostname, InternalIP, ExternalIP.
|
||||
// +kubebuilder:default={"Hostname","InternalIP","ExternalIP"}
|
||||
// +kubebuilder:validation:MinItems=1
|
||||
// Default to InternalIP, ExternalIP, Hostname.
|
||||
//+kubebuilder:default={"InternalIP","ExternalIP","Hostname"}
|
||||
//+kubebuilder:validation:MinItems=1
|
||||
//+listType=set
|
||||
PreferredAddressTypes []KubeletPreferredAddressType `json:"preferredAddressTypes,omitempty"`
|
||||
// CGroupFS defines the cgroup driver for Kubelet
|
||||
// CGroupFS defines the cgroup driver for Kubelet
|
||||
// https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/
|
||||
//
|
||||
// Deprecated: use ConfigurationJSONPatches.
|
||||
CGroupFS CGroupDriver `json:"cgroupfs,omitempty"`
|
||||
}
|
||||
|
||||
@@ -63,10 +95,36 @@ type KubernetesSpec struct {
|
||||
|
||||
// List of enabled Admission Controllers for the Tenant cluster.
|
||||
// Full reference available here: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers
|
||||
// +kubebuilder:default=CertificateApproval;CertificateSigning;CertificateSubjectRestriction;DefaultIngressClass;DefaultStorageClass;DefaultTolerationSeconds;LimitRanger;MutatingAdmissionWebhook;NamespaceLifecycle;PersistentVolumeClaimResize;Priority;ResourceQuota;RuntimeClass;ServiceAccount;StorageObjectInUseProtection;TaintNodesByCondition;ValidatingAdmissionWebhook
|
||||
//+kubebuilder:default=CertificateApproval;CertificateSigning;CertificateSubjectRestriction;DefaultIngressClass;DefaultStorageClass;DefaultTolerationSeconds;LimitRanger;MutatingAdmissionWebhook;NamespaceLifecycle;PersistentVolumeClaimResize;Priority;ResourceQuota;RuntimeClass;ServiceAccount;StorageObjectInUseProtection;TaintNodesByCondition;ValidatingAdmissionWebhook
|
||||
AdmissionControllers AdmissionControllers `json:"admissionControllers,omitempty"`
|
||||
}
|
||||
|
||||
type AdditionalPort struct {
|
||||
// The name of this port within the Service created by Kamaji.
|
||||
// This must be a DNS_LABEL, must have unique names, and cannot be `kube-apiserver`, or `konnectivity-server`.
|
||||
Name string `json:"name"`
|
||||
// The IP protocol for this port. Supports "TCP", "UDP", and "SCTP".
|
||||
//+kubebuilder:validation:Enum=TCP;UDP;SCTP
|
||||
//+kubebuilder:default=TCP
|
||||
Protocol corev1.Protocol `json:"protocol,omitempty"`
|
||||
// The application protocol for this port.
|
||||
// This is used as a hint for implementations to offer richer behavior for protocols that they understand.
|
||||
// This field follows standard Kubernetes label syntax.
|
||||
// Valid values are either:
|
||||
//
|
||||
// * Un-prefixed protocol names - reserved for IANA standard service names (as per
|
||||
// RFC-6335 and https://www.iana.org/assignments/service-names).
|
||||
AppProtocol *string `json:"appProtocol,omitempty"`
|
||||
// The port that will be exposed by this service.
|
||||
Port int32 `json:"port"`
|
||||
// Number or name of the port to access on the pods of the Tenant Control Plane.
|
||||
// Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
|
||||
// If this is a string, it will be looked up as a named port in the
|
||||
// target Pod's container ports. If this is not specified, the value
|
||||
// of the 'port' field is used (an identity map).
|
||||
TargetPort intstr.IntOrString `json:"targetPort"`
|
||||
}
|
||||
|
||||
// AdditionalMetadata defines which additional metadata, such as labels and annotations, must be attached to the created resource.
|
||||
type AdditionalMetadata struct {
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
@@ -75,6 +133,7 @@ type AdditionalMetadata struct {
|
||||
|
||||
// ControlPlane defines how the Tenant Control Plane Kubernetes resources must be created in the Admin Cluster,
|
||||
// such as the number of Pod replicas, the Service resource, or the Ingress.
|
||||
// +kubebuilder:validation:XValidation:rule="!(has(self.ingress) && has(self.gateway))",message="using both ingress and gateway is not supported"
|
||||
type ControlPlane struct {
|
||||
// Defining the options for the deployed Tenant Control Plane as Deployment resource.
|
||||
Deployment DeploymentSpec `json:"deployment,omitempty"`
|
||||
@@ -82,6 +141,8 @@ type ControlPlane struct {
|
||||
Service ServiceSpec `json:"service"`
|
||||
// Defining the options for an Optional Ingress which will expose API Server of the Tenant Control Plane
|
||||
Ingress *IngressSpec `json:"ingress,omitempty"`
|
||||
// Defining the options for an Optional Gateway which will expose API Server of the Tenant Control Plane
|
||||
Gateway *GatewaySpec `json:"gateway,omitempty"`
|
||||
}
|
||||
|
||||
// IngressSpec defines the options for the ingress which will expose API Server of the Tenant Control Plane.
|
||||
@@ -93,27 +154,33 @@ type IngressSpec struct {
|
||||
Hostname string `json:"hostname,omitempty"`
|
||||
}
|
||||
|
||||
// ComponentResourceRequirements describes the compute resource requirements.
|
||||
type ComponentResourceRequirements struct {
|
||||
// Limits describes the maximum amount of compute resources allowed.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
Limits corev1.ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"`
|
||||
// Requests describes the minimum amount of compute resources required.
|
||||
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
|
||||
// otherwise to an implementation-defined value.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||
Requests corev1.ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"`
|
||||
// GatewaySpec defines the options for the Gateway which will expose API Server of the Tenant Control Plane.
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.parentRefs) || size(self.parentRefs) == 0 || self.parentRefs.all(ref, !has(ref.port) && !has(ref.sectionName))",message="parentRefs must not specify port or sectionName, these are set automatically by Kamaji"
|
||||
type GatewaySpec struct {
|
||||
// AdditionalMetadata to add Labels and Annotations support.
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
// GatewayParentRefs is the class of the Gateway resource to use.
|
||||
GatewayParentRefs []gatewayv1.ParentReference `json:"parentRefs,omitempty"`
|
||||
// Hostname is an optional field which will be used as a route hostname.
|
||||
Hostname gatewayv1.Hostname `json:"hostname,omitempty"`
|
||||
}
|
||||
|
||||
type ControlPlaneComponentsResources struct {
|
||||
APIServer *ComponentResourceRequirements `json:"apiServer,omitempty"`
|
||||
ControllerManager *ComponentResourceRequirements `json:"controllerManager,omitempty"`
|
||||
Scheduler *ComponentResourceRequirements `json:"scheduler,omitempty"`
|
||||
APIServer *corev1.ResourceRequirements `json:"apiServer,omitempty"`
|
||||
ControllerManager *corev1.ResourceRequirements `json:"controllerManager,omitempty"`
|
||||
Scheduler *corev1.ResourceRequirements `json:"scheduler,omitempty"`
|
||||
// Define the kine container resources.
|
||||
// Available only if Kamaji is running using Kine as backing storage.
|
||||
Kine *corev1.ResourceRequirements `json:"kine,omitempty"`
|
||||
}
|
||||
|
||||
type DeploymentSpec struct {
|
||||
// +kubebuilder:default=2
|
||||
Replicas int32 `json:"replicas,omitempty"`
|
||||
// RegistrySettings allows to override the default images for the given Tenant Control Plane instance.
|
||||
// It could be used to point to a different container registry rather than the public one.
|
||||
//+kubebuilder:default={registry:"registry.k8s.io",apiServerImage:"kube-apiserver",controllerManagerImage:"kube-controller-manager",schedulerImage:"kube-scheduler"}
|
||||
RegistrySettings RegistrySettings `json:"registrySettings,omitempty"`
|
||||
//+kubebuilder:default=2
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node.
|
||||
// Selector which must match a node's labels for the pod to be scheduled on that node.
|
||||
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
@@ -126,7 +193,7 @@ type DeploymentSpec struct {
|
||||
RuntimeClassName string `json:"runtimeClassName,omitempty"`
|
||||
// Strategy describes how to replace existing pods with new ones for the given Tenant Control Plane.
|
||||
// Default value is set to Rolling Update, with a blue/green strategy.
|
||||
// +kubebuilder:default={type:"RollingUpdate",rollingUpdate:{maxUnavailable:0,maxSurge:"100%"}}
|
||||
//+kubebuilder:default={type:"RollingUpdate",rollingUpdate:{maxUnavailable:0,maxSurge:"100%"}}
|
||||
Strategy appsv1.DeploymentStrategy `json:"strategy,omitempty"`
|
||||
// If specified, the Tenant Control Plane pod's tolerations.
|
||||
// More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
||||
@@ -143,9 +210,31 @@ type DeploymentSpec struct {
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
Resources *ControlPlaneComponentsResources `json:"resources,omitempty"`
|
||||
// ExtraArgs allows adding additional arguments to the Control Plane components,
|
||||
// such as kube-apiserver, controller-manager, and scheduler.
|
||||
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
// such as kube-apiserver, controller-manager, and scheduler. WARNING - This option
|
||||
// can override existing parameters and cause components to misbehave in unxpected ways.
|
||||
// Only modify if you know what you are doing.
|
||||
ExtraArgs *ControlPlaneExtraArgs `json:"extraArgs,omitempty"`
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
PodAdditionalMetadata AdditionalMetadata `json:"podAdditionalMetadata,omitempty"`
|
||||
// AdditionalInitContainers allows adding additional init containers to the Control Plane deployment.
|
||||
AdditionalInitContainers []corev1.Container `json:"additionalInitContainers,omitempty"`
|
||||
// AdditionalContainers allows adding additional containers to the Control Plane deployment.
|
||||
AdditionalContainers []corev1.Container `json:"additionalContainers,omitempty"`
|
||||
// AdditionalVolumes allows to add additional volumes to the Control Plane deployment.
|
||||
AdditionalVolumes []corev1.Volume `json:"additionalVolumes,omitempty"`
|
||||
// AdditionalVolumeMounts allows to mount an additional volume into each component of the Control Plane
|
||||
// (kube-apiserver, controller-manager, and scheduler).
|
||||
AdditionalVolumeMounts *AdditionalVolumeMounts `json:"additionalVolumeMounts,omitempty"`
|
||||
//+kubebuilder:default="default"
|
||||
// ServiceAccountName allows to specify the service account to be mounted to the pods of the Control plane deployment
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
}
|
||||
|
||||
// AdditionalVolumeMounts allows mounting additional volumes to the Control Plane components.
|
||||
type AdditionalVolumeMounts struct {
|
||||
APIServer []corev1.VolumeMount `json:"apiServer,omitempty"`
|
||||
ControllerManager []corev1.VolumeMount `json:"controllerManager,omitempty"`
|
||||
Scheduler []corev1.VolumeMount `json:"scheduler,omitempty"`
|
||||
}
|
||||
|
||||
// ControlPlaneExtraArgs allows specifying additional arguments to the Control Plane components.
|
||||
@@ -159,6 +248,9 @@ type ControlPlaneExtraArgs struct {
|
||||
|
||||
type ServiceSpec struct {
|
||||
AdditionalMetadata AdditionalMetadata `json:"additionalMetadata,omitempty"`
|
||||
// AdditionalPorts allows adding additional ports to the Service generated Kamaji
|
||||
// which targets the Tenant Control Plane pods.
|
||||
AdditionalPorts []AdditionalPort `json:"additionalPorts,omitempty"`
|
||||
// ServiceType allows specifying how to expose the Tenant Control Plane.
|
||||
ServiceType ServiceType `json:"serviceType"`
|
||||
}
|
||||
@@ -178,37 +270,71 @@ type ImageOverrideTrait struct {
|
||||
}
|
||||
|
||||
// ExtraArgs allows adding additional arguments to said component.
|
||||
// WARNING - This option can override existing konnectivity
|
||||
// parameters and cause konnectivity components to misbehave in
|
||||
// unxpected ways. Only modify if you know what you are doing.
|
||||
type ExtraArgs []string
|
||||
|
||||
type KonnectivityServerSpec struct {
|
||||
// The port which Konnectivity server is listening to.
|
||||
Port int32 `json:"port"`
|
||||
// Container image version of the Konnectivity server.
|
||||
// +kubebuilder:default=v0.0.32
|
||||
// If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
|
||||
//
|
||||
// WARNING: for last cut-off releases, the container image could be not available.
|
||||
Version string `json:"version,omitempty"`
|
||||
// Container image used by the Konnectivity server.
|
||||
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
|
||||
//+kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-server
|
||||
Image string `json:"image,omitempty"`
|
||||
// Resources define the amount of CPU and memory to allocate to the Konnectivity server.
|
||||
Resources *ComponentResourceRequirements `json:"resources,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
}
|
||||
|
||||
type KonnectivityAgentMode string
|
||||
|
||||
var (
|
||||
KonnectivityAgentModeDaemonSet KonnectivityAgentMode = "DaemonSet"
|
||||
KonnectivityAgentModeDeployment KonnectivityAgentMode = "Deployment"
|
||||
)
|
||||
|
||||
//+kubebuilder:validation:XValidation:rule="!(self.mode == 'DaemonSet' && has(self.replicas) && self.replicas != 0) && !(self.mode == 'Deployment' && has(self.replicas) && self.replicas == 0)",message="replicas must be 0 (or unset) when mode is DaemonSet, and greater than 0 (or unset) when mode is Deployment"
|
||||
|
||||
type KonnectivityAgentSpec struct {
|
||||
// AgentImage defines the container image for Konnectivity's agent.
|
||||
// +kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-agent
|
||||
//+kubebuilder:default=registry.k8s.io/kas-network-proxy/proxy-agent
|
||||
Image string `json:"image,omitempty"`
|
||||
// Version for Konnectivity agent.
|
||||
// +kubebuilder:default=v0.0.32
|
||||
Version string `json:"version,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
// If left empty, Kamaji will automatically inflect the version from the deployed Tenant Control Plane.
|
||||
//
|
||||
// WARNING: for last cut-off releases, the container image could be not available.
|
||||
Version string `json:"version,omitempty"`
|
||||
// Tolerations for the deployed agent.
|
||||
// Can be customized to start the konnectivity-agent even if the nodes are not ready or tainted.
|
||||
//+kubebuilder:default={{key: "CriticalAddonsOnly", operator: "Exists"}}
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
ExtraArgs ExtraArgs `json:"extraArgs,omitempty"`
|
||||
// HostNetwork enables the konnectivity agent to use the Host network namespace.
|
||||
// By enabling this mode, the Agent doesn't need to wait for the CNI initialisation,
|
||||
// enabling a sort of out-of-band access to nodes for troubleshooting scenarios,
|
||||
// or when the agent needs direct access to the host network.
|
||||
//+kubebuilder:default=false
|
||||
HostNetwork bool `json:"hostNetwork,omitempty"`
|
||||
// Mode allows specifying the Agent deployment mode: Deployment, or DaemonSet (default).
|
||||
//+kubebuilder:default="DaemonSet"
|
||||
//+kubebuilder:validation:Enum=DaemonSet;Deployment
|
||||
Mode KonnectivityAgentMode `json:"mode,omitempty"`
|
||||
// Replicas defines the number of replicas when Mode is Deployment.
|
||||
// Must be 0 if Mode is DaemonSet.
|
||||
//+kubebuilder:validation:Optional
|
||||
Replicas *int32 `json:"replicas,omitempty"`
|
||||
}
|
||||
|
||||
// KonnectivitySpec defines the spec for Konnectivity.
|
||||
type KonnectivitySpec struct {
|
||||
// +kubebuilder:default={version:"v0.0.32",image:"registry.k8s.io/kas-network-proxy/proxy-server",port:8132}
|
||||
//+kubebuilder:default={image:"registry.k8s.io/kas-network-proxy/proxy-server",port:8132}
|
||||
KonnectivityServerSpec KonnectivityServerSpec `json:"server,omitempty"`
|
||||
// +kubebuilder:default={version:"v0.0.32",image:"registry.k8s.io/kas-network-proxy/proxy-agent"}
|
||||
//+kubebuilder:default={image:"registry.k8s.io/kas-network-proxy/proxy-agent",mode:"DaemonSet"}
|
||||
KonnectivityAgentSpec KonnectivityAgentSpec `json:"agent,omitempty"`
|
||||
}
|
||||
|
||||
@@ -224,13 +350,66 @@ type AddonsSpec struct {
|
||||
KubeProxy *AddonSpec `json:"kubeProxy,omitempty"`
|
||||
}
|
||||
|
||||
type Permissions struct {
|
||||
BlockCreate bool `json:"blockCreation,omitempty"`
|
||||
BlockUpdate bool `json:"blockUpdate,omitempty"`
|
||||
BlockDelete bool `json:"blockDeletion,omitempty"`
|
||||
}
|
||||
|
||||
func (p *Permissions) HasAnyLimitation() bool {
|
||||
if p.BlockCreate || p.BlockUpdate || p.BlockDelete {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DataStoreOverride defines which kubernetes resource will be stored in a dedicated datastore.
|
||||
type DataStoreOverride struct {
|
||||
// Resource specifies which kubernetes resource to target.
|
||||
Resource string `json:"resource,omitempty"`
|
||||
// DataStore specifies the DataStore that should be used to store the Kubernetes data for the given Resource.
|
||||
DataStore string `json:"dataStore,omitempty"`
|
||||
}
|
||||
|
||||
// TenantControlPlaneSpec defines the desired state of TenantControlPlane.
|
||||
// +kubebuilder:validation:XValidation:rule="!has(oldSelf.dataStore) || has(self.dataStore)", message="unsetting the dataStore is not supported"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(oldSelf.dataStoreSchema) || has(self.dataStoreSchema)", message="unsetting the dataStoreSchema is not supported"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(oldSelf.dataStoreUsername) || has(self.dataStoreUsername)", message="unsetting the dataStoreUsername is not supported"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.networkProfile.loadBalancerSourceRanges) || (size(self.networkProfile.loadBalancerSourceRanges) == 0 || self.controlPlane.service.serviceType == 'LoadBalancer')", message="LoadBalancer source ranges are supported only with LoadBalancer service type"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.networkProfile.loadBalancerClass) || self.controlPlane.service.serviceType == 'LoadBalancer'", message="LoadBalancerClass is supported only with LoadBalancer service type"
|
||||
// +kubebuilder:validation:XValidation:rule="self.controlPlane.service.serviceType != 'LoadBalancer' || (oldSelf.controlPlane.service.serviceType != 'LoadBalancer' && self.controlPlane.service.serviceType == 'LoadBalancer') || has(self.networkProfile.loadBalancerClass) == has(oldSelf.networkProfile.loadBalancerClass)",message="LoadBalancerClass cannot be set or unset at runtime"
|
||||
|
||||
type TenantControlPlaneSpec struct {
|
||||
// DataStore allows to specify a DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
// This parameter is optional and acts as an override over the default one which is used by the Kamaji Operator.
|
||||
// Migration from a different DataStore to another one is not yet supported and the reconciliation will be blocked.
|
||||
DataStore string `json:"dataStore,omitempty"`
|
||||
ControlPlane ControlPlane `json:"controlPlane"`
|
||||
// WritePermissions allows to select which operations (create, delete, update) must be blocked:
|
||||
// by default, all actions are allowed, and API Server can write to its Datastore.
|
||||
//
|
||||
// By blocking all actions, the Tenant Control Plane can enter in a Read Only mode:
|
||||
// this phase can be used to prevent Datastore quota exhaustion or for your own business logic
|
||||
// (e.g.: blocking creation and update, but allowing deletion to "clean up" space).
|
||||
WritePermissions Permissions `json:"writePermissions,omitempty"`
|
||||
// DataStore specifies the DataStore that should be used to store the Kubernetes data for the given Tenant Control Plane.
|
||||
// When Kamaji runs with the default DataStore flag, all empty values will inherit the default value.
|
||||
// By leaving it empty and running Kamaji with no default DataStore flag, it is possible to achieve automatic assignment to a specific DataStore object.
|
||||
//
|
||||
// Migration from one DataStore to another backed by the same Driver is possible. See: https://kamaji.clastix.io/guides/datastore-migration/
|
||||
// Migration from one DataStore to another backed by a different Driver is not supported.
|
||||
DataStore string `json:"dataStore,omitempty"`
|
||||
// DataStoreSchema allows to specify the name of the database (for relational DataStores) or the key prefix (for etcd). This
|
||||
// value is optional and immutable. Note that Kamaji currently doesn't ensure that DataStoreSchema values are unique. It's up
|
||||
// to the user to avoid clashes between different TenantControlPlanes. If not set upon creation, Kamaji will default the
|
||||
// DataStoreSchema by concatenating the namespace and name of the TenantControlPlane.
|
||||
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="changing the dataStoreSchema is not supported"
|
||||
DataStoreSchema string `json:"dataStoreSchema,omitempty"`
|
||||
// DataStoreUsername allows to specify the username of the database (for relational DataStores). This
|
||||
// value is optional and immutable. Note that Kamaji currently doesn't ensure that DataStoreUsername values are unique. It's up
|
||||
// to the user to avoid clashes between different TenantControlPlanes. If not set upon creation, Kamaji will default the
|
||||
// DataStoreUsername by concatenating the namespace and name of the TenantControlPlane.
|
||||
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="changing the dataStoreUsername is not supported"
|
||||
DataStoreUsername string `json:"dataStoreUsername,omitempty"`
|
||||
// DataStoreOverride defines which kubernetes resources will be stored in dedicated datastores.
|
||||
DataStoreOverrides []DataStoreOverride `json:"dataStoreOverrides,omitempty"`
|
||||
ControlPlane ControlPlane `json:"controlPlane"`
|
||||
// Kubernetes specification for tenant control plane
|
||||
Kubernetes KubernetesSpec `json:"kubernetes"`
|
||||
// NetworkProfile specifies how the network is
|
||||
@@ -239,16 +418,18 @@ type TenantControlPlaneSpec struct {
|
||||
Addons AddonsSpec `json:"addons,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:subresource:scale:specpath=.spec.controlPlane.deployment.replicas,statuspath=.status.kubernetesResources.deployment.replicas,selectorpath=.status.kubernetesResources.deployment.selector
|
||||
// +kubebuilder:resource:shortName=tcp
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.kubernetes.version",description="Kubernetes version"
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.kubernetesResources.version.status",description="Status"
|
||||
// +kubebuilder:printcolumn:name="Control-Plane endpoint",type="string",JSONPath=".status.controlPlaneEndpoint",description="Tenant Control Plane Endpoint (API server)"
|
||||
// +kubebuilder:printcolumn:name="Kubeconfig",type="string",JSONPath=".status.kubeconfig.admin.secretName",description="Secret which contains admin kubeconfig"
|
||||
//+kubebuilder:object:root=true
|
||||
//+kubebuilder:subresource:status
|
||||
//+kubebuilder:subresource:scale:specpath=.spec.controlPlane.deployment.replicas,statuspath=.status.kubernetesResources.deployment.replicas,selectorpath=.status.kubernetesResources.deployment.selector
|
||||
//+kubebuilder:resource:categories=kamaji,shortName=tcp
|
||||
//+kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.kubernetes.version",description="Kubernetes version"
|
||||
//+kubebuilder:printcolumn:name="Installed Version",type="string",JSONPath=".status.kubernetesResources.version.version",description="The actual installed Kubernetes version from status"
|
||||
//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.kubernetesResources.version.status",description="Status"
|
||||
//+kubebuilder:printcolumn:name="Control-Plane endpoint",type="string",JSONPath=".status.controlPlaneEndpoint",description="Tenant Control Plane Endpoint (API server)"
|
||||
//+kubebuilder:printcolumn:name="Kubeconfig",type="string",JSONPath=".status.kubeconfig.admin.secretName",description="Secret which contains admin kubeconfig"
|
||||
//+kubebuilder:printcolumn:name="Datastore",type="string",JSONPath=".status.storage.dataStoreName",description="DataStore actually used"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
//+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
//+kubebuilder:metadata:annotations={"cert-manager.io/inject-ca-from=kamaji-system/kamaji-serving-cert"}
|
||||
|
||||
// TenantControlPlane is the Schema for the tenantcontrolplanes API.
|
||||
type TenantControlPlane struct {
|
||||
|
||||
78
api/v1alpha1/tenantcontrolplane_types_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("Cluster controller", func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
tcp *TenantControlPlane
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
tcp = &TenantControlPlane{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tcp",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: TenantControlPlaneSpec{},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if err := k8sClient.Delete(ctx, tcp); err != nil && !apierrors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
Context("LoadBalancer Source Ranges", func() {
|
||||
It("allows creation when no CIDR ranges are provided", func() {
|
||||
tcp.Spec.ControlPlane.Service.ServiceType = ServiceTypeLoadBalancer
|
||||
|
||||
err := k8sClient.Create(ctx, tcp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("allows creation with an explicitly empty CIDR list", func() {
|
||||
tcp.Spec.ControlPlane.Service.ServiceType = ServiceTypeLoadBalancer
|
||||
tcp.Spec.NetworkProfile.LoadBalancerSourceRanges = []string{}
|
||||
|
||||
err := k8sClient.Create(ctx, tcp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("allows creation when service type is not LoadBalancer and it has an empty CIDR list", func() {
|
||||
tcp.Spec.ControlPlane.Service.ServiceType = ServiceTypeNodePort
|
||||
|
||||
err := k8sClient.Create(ctx, tcp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("allows CIDR ranges when service type is LoadBalancer", func() {
|
||||
tcp.Spec.ControlPlane.Service.ServiceType = ServiceTypeLoadBalancer
|
||||
tcp.Spec.NetworkProfile.LoadBalancerSourceRanges = []string{"192.168.0.0/24"}
|
||||
|
||||
err := k8sClient.Create(ctx, tcp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("denies CIDR ranges when service type is not LoadBalancer", func() {
|
||||
tcp.Spec.ControlPlane.Service.ServiceType = ServiceTypeNodePort
|
||||
tcp.Spec.NetworkProfile.LoadBalancerSourceRanges = []string{"192.168.0.0/24"}
|
||||
|
||||
err := k8sClient.Create(ctx, tcp)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("LoadBalancer source ranges are supported only with LoadBalancer service type"))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,188 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/clastix/kamaji/internal/upgrade"
|
||||
)
|
||||
|
||||
//+kubebuilder:webhook:path=/mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=true,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=mtenantcontrolplane.kb.io,admissionReviewVersions=v1
|
||||
//+kubebuilder:webhook:path=/validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane,mutating=false,failurePolicy=fail,sideEffects=None,groups=kamaji.clastix.io,resources=tenantcontrolplanes,verbs=create;update,versions=v1alpha1,name=vtenantcontrolplane.kb.io,admissionReviewVersions=v1
|
||||
|
||||
func (in *TenantControlPlane) SetupWebhookWithManager(mgr ctrl.Manager, datastore string) error {
|
||||
validator := &tenantControlPlaneValidator{
|
||||
client: mgr.GetClient(),
|
||||
defaultDatastore: datastore,
|
||||
log: mgr.GetLogger().WithName("tenantcontrolplane-webhook"),
|
||||
}
|
||||
|
||||
return ctrl.NewWebhookManagedBy(mgr).
|
||||
For(in).
|
||||
WithValidator(validator).
|
||||
WithDefaulter(validator).
|
||||
Complete()
|
||||
}
|
||||
|
||||
type tenantControlPlaneValidator struct {
|
||||
client client.Client
|
||||
defaultDatastore string
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) Default(_ context.Context, obj runtime.Object) error {
|
||||
tcp, ok := obj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
if len(tcp.Spec.DataStore) == 0 {
|
||||
tcp.Spec.DataStore = t.defaultDatastore
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateCreate(_ context.Context, obj runtime.Object) error {
|
||||
tcp, ok := obj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
t.log.Info("validate create", "name", tcp.Name, "namespace", tcp.Namespace)
|
||||
|
||||
ver, err := semver.New(t.normalizeKubernetesVersion(tcp.Spec.Kubernetes.Version))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to parse the desired Kubernetes version")
|
||||
}
|
||||
|
||||
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
|
||||
if supportedErr != nil {
|
||||
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
|
||||
}
|
||||
|
||||
if ver.GT(supportedVer) {
|
||||
return fmt.Errorf("unable to create a TenantControlPlane with a Kubernetes version greater than the supported one, actually %s", supportedVer.String())
|
||||
}
|
||||
|
||||
if err = t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) error {
|
||||
old, ok := oldObj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
tcp, ok := newObj.(*TenantControlPlane)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected *kamajiv1alpha1.TenantControlPlane")
|
||||
}
|
||||
|
||||
t.log.Info("validate update", "name", tcp.Name, "namespace", tcp.Namespace)
|
||||
|
||||
if err := t.validateVersionUpdate(old, tcp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.validateDataStore(ctx, old, tcp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := t.validatePreferredKubeletAddressTypes(tcp.Spec.Kubernetes.Kubelet.PreferredAddressTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) ValidateDelete(context.Context, runtime.Object) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validatePreferredKubeletAddressTypes(addressTypes []KubeletPreferredAddressType) error {
|
||||
s := sets.NewString()
|
||||
|
||||
for _, at := range addressTypes {
|
||||
if s.Has(string(at)) {
|
||||
return fmt.Errorf("preferred kubelet address types is stated multiple times: %s", at)
|
||||
}
|
||||
|
||||
s.Insert(string(at))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validateVersionUpdate(oldObj, newObj *TenantControlPlane) error {
|
||||
oldVer, oldErr := semver.Make(t.normalizeKubernetesVersion(oldObj.Spec.Kubernetes.Version))
|
||||
if oldErr != nil {
|
||||
return errors.Wrap(oldErr, "unable to parse the previous Kubernetes version")
|
||||
}
|
||||
|
||||
newVer, newErr := semver.New(t.normalizeKubernetesVersion(newObj.Spec.Kubernetes.Version))
|
||||
if newErr != nil {
|
||||
return errors.Wrap(newErr, "unable to parse the desired Kubernetes version")
|
||||
}
|
||||
|
||||
supportedVer, supportedErr := semver.Make(t.normalizeKubernetesVersion(upgrade.KubeadmVersion))
|
||||
if supportedErr != nil {
|
||||
return errors.Wrap(supportedErr, "unable to parse the Kamaji supported Kubernetes version")
|
||||
}
|
||||
|
||||
switch {
|
||||
case newVer.GT(supportedVer):
|
||||
return fmt.Errorf("unable to upgrade to a version greater than the supported one, actually %s", supportedVer.String())
|
||||
case newVer.LT(oldVer):
|
||||
return fmt.Errorf("unable to downgrade a TenantControlPlane from %s to %s", oldVer.String(), newVer.String())
|
||||
case newVer.Minor-oldVer.Minor > 1:
|
||||
return fmt.Errorf("unable to upgrade to a minor version in a non-sequential mode")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) validateDataStore(ctx context.Context, oldObj, tcp *TenantControlPlane) error {
|
||||
if oldObj.Spec.DataStore == tcp.Spec.DataStore {
|
||||
return nil
|
||||
}
|
||||
|
||||
previousDatastore, desiredDatastore := &DataStore{}, &DataStore{}
|
||||
|
||||
if err := t.client.Get(ctx, types.NamespacedName{Name: oldObj.Spec.DataStore}, previousDatastore); err != nil {
|
||||
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
|
||||
}
|
||||
|
||||
if err := t.client.Get(ctx, types.NamespacedName{Name: tcp.Spec.DataStore}, desiredDatastore); err != nil {
|
||||
return fmt.Errorf("unable to retrieve old DataStore for validation: %w", err)
|
||||
}
|
||||
|
||||
if previousDatastore.Spec.Driver != desiredDatastore.Spec.Driver {
|
||||
return fmt.Errorf("migration between different Datastore drivers is not supported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tenantControlPlaneValidator) normalizeKubernetesVersion(input string) string {
|
||||
if strings.HasPrefix(input, "v") {
|
||||
return strings.Replace(input, "v", "", 1)
|
||||
}
|
||||
|
||||
return input
|
||||
}
|
||||
@@ -28,9 +28,10 @@ func (c CGroupDriver) String() string {
|
||||
}
|
||||
|
||||
const (
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
ServiceTypeLoadBalancer = (ServiceType)(corev1.ServiceTypeLoadBalancer)
|
||||
ServiceTypeClusterIP = (ServiceType)(corev1.ServiceTypeClusterIP)
|
||||
ServiceTypeNodePort = (ServiceType)(corev1.ServiceTypeNodePort)
|
||||
KubeconfigSecretKeyAnnotation = "kamaji.clastix.io/kubeconfig-secret-key"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer
|
||||
|
||||
177
api/v1alpha1/validations_test.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var _ = Describe("Datastores validation test", func() {
|
||||
var (
|
||||
ctx context.Context
|
||||
ds *DataStore
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ds",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: DataStoreSpec{},
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if err := k8sClient.Delete(ctx, ds); err != nil && !apierrors.IsNotFound(err) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
})
|
||||
|
||||
Context("DataStores fields", func() {
|
||||
It("datastores of type ETCD must have their TLS configurations set correctly", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-etcd",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "etcd",
|
||||
Endpoints: []string{"etcd-server:2379"},
|
||||
TLSConfig: &TLSConfig{
|
||||
CertificateAuthority: CertKeyPair{},
|
||||
ClientCertificate: &ClientCertificate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("certificateAuthority privateKey must have secretReference or content when driver is etcd"))
|
||||
})
|
||||
|
||||
It("valid ETCD DataStore should be created", func() {
|
||||
var (
|
||||
cert = []byte("cert")
|
||||
key = []byte("privkey")
|
||||
)
|
||||
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-etcd",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "etcd",
|
||||
Endpoints: []string{"etcd-server:2379"},
|
||||
TLSConfig: &TLSConfig{
|
||||
CertificateAuthority: CertKeyPair{
|
||||
Certificate: ContentRef{
|
||||
Content: cert,
|
||||
},
|
||||
PrivateKey: &ContentRef{
|
||||
Content: key,
|
||||
},
|
||||
},
|
||||
ClientCertificate: &ClientCertificate{
|
||||
Certificate: ContentRef{
|
||||
Content: cert,
|
||||
},
|
||||
PrivateKey: ContentRef{
|
||||
Content: key,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL must have either basicAuth or tlsConfig", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("When driver is not etcd, either tlsConfig or basicAuth must be provided"))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL can have basicAuth", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
BasicAuth: &BasicAuth{
|
||||
Username: ContentRef{
|
||||
Content: []byte("postgres"),
|
||||
},
|
||||
Password: ContentRef{
|
||||
Content: []byte("postgres"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, ds)
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL must have tlsConfig with proper content", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
TLSConfig: &TLSConfig{
|
||||
ClientCertificate: &ClientCertificate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(context.Background(), ds)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content"))
|
||||
})
|
||||
|
||||
It("datastores of type PostgreSQL need a proper clientCertificate", func() {
|
||||
ds = &DataStore{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "good-pg",
|
||||
},
|
||||
Spec: DataStoreSpec{
|
||||
Driver: "PostgreSQL",
|
||||
Endpoints: []string{"pg-server:5432"},
|
||||
TLSConfig: &TLSConfig{
|
||||
ClientCertificate: &ClientCertificate{
|
||||
Certificate: ContentRef{
|
||||
Content: []byte("cert"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(context.Background(), ds)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -1,123 +0,0 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
admissionv1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
//+kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var (
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
testEnv *envtest.Environment
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
)
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Webhook Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: false,
|
||||
WebhookInstallOptions: envtest.WebhookInstallOptions{
|
||||
Paths: []string{filepath.Join("..", "..", "config", "webhook")},
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
err = AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = admissionv1beta1.AddToScheme(scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
|
||||
// start webhook server using Manager
|
||||
webhookInstallOptions := &testEnv.WebhookInstallOptions
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Host: webhookInstallOptions.LocalServingHost,
|
||||
Port: webhookInstallOptions.LocalServingPort,
|
||||
CertDir: webhookInstallOptions.LocalServingCertDir,
|
||||
LeaderElection: false,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&TenantControlPlane{}).SetupWebhookWithManager(mgr, "")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = (&DataStore{}).SetupWebhookWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//+kubebuilder:scaffold:webhook
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
err = mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
|
||||
// wait for the webhook server to get ready
|
||||
dialer := &net.Dialer{Timeout: time.Second}
|
||||
addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort)
|
||||
Eventually(func() error {
|
||||
conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
return nil
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
cancel()
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
@@ -9,8 +8,10 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
apisv1 "sigs.k8s.io/gateway-api/apis/v1"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
@@ -58,6 +59,63 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalPort) DeepCopyInto(out *AdditionalPort) {
|
||||
*out = *in
|
||||
if in.AppProtocol != nil {
|
||||
in, out := &in.AppProtocol, &out.AppProtocol
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
out.TargetPort = in.TargetPort
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalPort.
|
||||
func (in *AdditionalPort) DeepCopy() *AdditionalPort {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdditionalPort)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalVolumeMounts) DeepCopyInto(out *AdditionalVolumeMounts) {
|
||||
*out = *in
|
||||
if in.APIServer != nil {
|
||||
in, out := &in.APIServer, &out.APIServer
|
||||
*out = make([]corev1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ControllerManager != nil {
|
||||
in, out := &in.ControllerManager, &out.ControllerManager
|
||||
*out = make([]corev1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Scheduler != nil {
|
||||
in, out := &in.Scheduler, &out.Scheduler
|
||||
*out = make([]corev1.VolumeMount, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalVolumeMounts.
|
||||
func (in *AdditionalVolumeMounts) DeepCopy() *AdditionalVolumeMounts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdditionalVolumeMounts)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AddonSpec) DeepCopyInto(out *AddonSpec) {
|
||||
*out = *in
|
||||
@@ -255,30 +313,16 @@ func (in *ClientCertificate) DeepCopy() *ClientCertificate {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ComponentResourceRequirements) DeepCopyInto(out *ComponentResourceRequirements) {
|
||||
func (in *CompoundValue) DeepCopyInto(out *CompoundValue) {
|
||||
*out = *in
|
||||
if in.Limits != nil {
|
||||
in, out := &in.Limits, &out.Limits
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
if in.Requests != nil {
|
||||
in, out := &in.Requests, &out.Requests
|
||||
*out = make(v1.ResourceList, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentResourceRequirements.
|
||||
func (in *ComponentResourceRequirements) DeepCopy() *ComponentResourceRequirements {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CompoundValue.
|
||||
func (in *CompoundValue) DeepCopy() *CompoundValue {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ComponentResourceRequirements)
|
||||
out := new(CompoundValue)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
@@ -318,6 +362,11 @@ func (in *ControlPlane) DeepCopyInto(out *ControlPlane) {
|
||||
*out = new(IngressSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Gateway != nil {
|
||||
in, out := &in.Gateway, &out.Gateway
|
||||
*out = new(GatewaySpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlane.
|
||||
@@ -335,17 +384,22 @@ func (in *ControlPlaneComponentsResources) DeepCopyInto(out *ControlPlaneCompone
|
||||
*out = *in
|
||||
if in.APIServer != nil {
|
||||
in, out := &in.APIServer, &out.APIServer
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ControllerManager != nil {
|
||||
in, out := &in.ControllerManager, &out.ControllerManager
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Scheduler != nil {
|
||||
in, out := &in.Scheduler, &out.Scheduler
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Kine != nil {
|
||||
in, out := &in.Kine, &out.Kine
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
@@ -485,6 +539,21 @@ func (in *DataStoreList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataStoreOverride) DeepCopyInto(out *DataStoreOverride) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreOverride.
|
||||
func (in *DataStoreOverride) DeepCopy() *DataStoreOverride {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DataStoreOverride)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DataStoreSetupStatus) DeepCopyInto(out *DataStoreSetupStatus) {
|
||||
*out = *in
|
||||
@@ -514,7 +583,11 @@ func (in *DataStoreSpec) DeepCopyInto(out *DataStoreSpec) {
|
||||
*out = new(BasicAuth)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.TLSConfig.DeepCopyInto(&out.TLSConfig)
|
||||
if in.TLSConfig != nil {
|
||||
in, out := &in.TLSConfig, &out.TLSConfig
|
||||
*out = new(TLSConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataStoreSpec.
|
||||
@@ -547,9 +620,30 @@ func (in *DataStoreStatus) DeepCopy() *DataStoreStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DatastoreUsedSecret) DeepCopyInto(out *DatastoreUsedSecret) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatastoreUsedSecret.
|
||||
func (in *DatastoreUsedSecret) DeepCopy() *DatastoreUsedSecret {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DatastoreUsedSecret)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
*out = *in
|
||||
out.RegistrySettings = in.RegistrySettings
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -557,21 +651,22 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
in.Strategy.DeepCopyInto(&out.Strategy)
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]v1.Toleration, len(*in))
|
||||
*out = make([]corev1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(v1.Affinity)
|
||||
*out = new(corev1.Affinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.TopologySpreadConstraints != nil {
|
||||
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
|
||||
*out = make([]v1.TopologySpreadConstraint, len(*in))
|
||||
*out = make([]corev1.TopologySpreadConstraint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
@@ -587,6 +682,33 @@ func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) {
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
|
||||
in.PodAdditionalMetadata.DeepCopyInto(&out.PodAdditionalMetadata)
|
||||
if in.AdditionalInitContainers != nil {
|
||||
in, out := &in.AdditionalInitContainers, &out.AdditionalInitContainers
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalContainers != nil {
|
||||
in, out := &in.AdditionalContainers, &out.AdditionalContainers
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalVolumes != nil {
|
||||
in, out := &in.AdditionalVolumes, &out.AdditionalVolumes
|
||||
*out = make([]corev1.Volume, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalVolumeMounts != nil {
|
||||
in, out := &in.AdditionalVolumeMounts, &out.AdditionalVolumeMounts
|
||||
*out = new(AdditionalVolumeMounts)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec.
|
||||
@@ -686,6 +808,69 @@ func (in ExtraArgs) DeepCopy() ExtraArgs {
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewayAccessPoint) DeepCopyInto(out *GatewayAccessPoint) {
|
||||
*out = *in
|
||||
if in.Type != nil {
|
||||
in, out := &in.Type, &out.Type
|
||||
*out = new(apisv1.AddressType)
|
||||
**out = **in
|
||||
}
|
||||
if in.URLs != nil {
|
||||
in, out := &in.URLs, &out.URLs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayAccessPoint.
|
||||
func (in *GatewayAccessPoint) DeepCopy() *GatewayAccessPoint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewayAccessPoint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewayListener) DeepCopyInto(out *GatewayListener) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewayListener.
|
||||
func (in *GatewayListener) DeepCopy() *GatewayListener {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewayListener)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GatewaySpec) DeepCopyInto(out *GatewaySpec) {
|
||||
*out = *in
|
||||
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
|
||||
if in.GatewayParentRefs != nil {
|
||||
in, out := &in.GatewayParentRefs, &out.GatewayParentRefs
|
||||
*out = make([]apisv1.ParentReference, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatewaySpec.
|
||||
func (in *GatewaySpec) DeepCopy() *GatewaySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GatewaySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageOverrideTrait) DeepCopyInto(out *ImageOverrideTrait) {
|
||||
*out = *in
|
||||
@@ -717,14 +902,67 @@ func (in *IngressSpec) DeepCopy() *IngressSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
|
||||
*out = *in
|
||||
if in.Value != nil {
|
||||
in, out := &in.Value, &out.Value
|
||||
*out = new(v1.JSON)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
|
||||
func (in *JSONPatch) DeepCopy() *JSONPatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(JSONPatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in JSONPatches) DeepCopyInto(out *JSONPatches) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(JSONPatches, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatches.
|
||||
func (in JSONPatches) DeepCopy() JSONPatches {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(JSONPatches)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KonnectivityAgentSpec) DeepCopyInto(out *KonnectivityAgentSpec) {
|
||||
*out = *in
|
||||
if in.Tolerations != nil {
|
||||
in, out := &in.Tolerations, &out.Tolerations
|
||||
*out = make([]corev1.Toleration, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExtraArgs != nil {
|
||||
in, out := &in.ExtraArgs, &out.ExtraArgs
|
||||
*out = make(ExtraArgs, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KonnectivityAgentSpec.
|
||||
@@ -737,6 +975,22 @@ func (in *KonnectivityAgentSpec) DeepCopy() *KonnectivityAgentSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KonnectivityAgentStatus) DeepCopyInto(out *KonnectivityAgentStatus) {
|
||||
*out = *in
|
||||
in.ExternalKubernetesObjectStatus.DeepCopyInto(&out.ExternalKubernetesObjectStatus)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KonnectivityAgentStatus.
|
||||
func (in *KonnectivityAgentStatus) DeepCopy() *KonnectivityAgentStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KonnectivityAgentStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KonnectivityConfigMap) DeepCopyInto(out *KonnectivityConfigMap) {
|
||||
*out = *in
|
||||
@@ -757,7 +1011,7 @@ func (in *KonnectivityServerSpec) DeepCopyInto(out *KonnectivityServerSpec) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(ComponentResourceRequirements)
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExtraArgs != nil {
|
||||
@@ -804,6 +1058,11 @@ func (in *KonnectivityStatus) DeepCopyInto(out *KonnectivityStatus) {
|
||||
in.ClusterRoleBinding.DeepCopyInto(&out.ClusterRoleBinding)
|
||||
in.Agent.DeepCopyInto(&out.Agent)
|
||||
in.Service.DeepCopyInto(&out.Service)
|
||||
if in.Gateway != nil {
|
||||
in, out := &in.Gateway, &out.Gateway
|
||||
*out = new(KubernetesGatewayStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KonnectivityStatus.
|
||||
@@ -864,6 +1123,123 @@ func (in *KubeadmPhasesStatus) DeepCopy() *KubeadmPhasesStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigGenerator) DeepCopyInto(out *KubeconfigGenerator) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigGenerator.
|
||||
func (in *KubeconfigGenerator) DeepCopy() *KubeconfigGenerator {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeconfigGenerator)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KubeconfigGenerator) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigGeneratorList) DeepCopyInto(out *KubeconfigGeneratorList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]KubeconfigGenerator, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigGeneratorList.
|
||||
func (in *KubeconfigGeneratorList) DeepCopy() *KubeconfigGeneratorList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeconfigGeneratorList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *KubeconfigGeneratorList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigGeneratorSpec) DeepCopyInto(out *KubeconfigGeneratorSpec) {
|
||||
*out = *in
|
||||
in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector)
|
||||
in.TenantControlPlaneSelector.DeepCopyInto(&out.TenantControlPlaneSelector)
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]CompoundValue, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
out.User = in.User
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigGeneratorSpec.
|
||||
func (in *KubeconfigGeneratorSpec) DeepCopy() *KubeconfigGeneratorSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeconfigGeneratorSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigGeneratorStatus) DeepCopyInto(out *KubeconfigGeneratorStatus) {
|
||||
*out = *in
|
||||
if in.Errors != nil {
|
||||
in, out := &in.Errors, &out.Errors
|
||||
*out = make([]KubeconfigGeneratorStatusError, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigGeneratorStatus.
|
||||
func (in *KubeconfigGeneratorStatus) DeepCopy() *KubeconfigGeneratorStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeconfigGeneratorStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigGeneratorStatusError) DeepCopyInto(out *KubeconfigGeneratorStatusError) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeconfigGeneratorStatusError.
|
||||
func (in *KubeconfigGeneratorStatusError) DeepCopy() *KubeconfigGeneratorStatusError {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubeconfigGeneratorStatusError)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigStatus) DeepCopyInto(out *KubeconfigStatus) {
|
||||
*out = *in
|
||||
@@ -901,6 +1277,18 @@ func (in *KubeconfigsStatus) DeepCopy() *KubeconfigsStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeletSpec) DeepCopyInto(out *KubeletSpec) {
|
||||
*out = *in
|
||||
if in.ConfigurationJSONPatches != nil {
|
||||
in, out := &in.ConfigurationJSONPatches, &out.ConfigurationJSONPatches
|
||||
*out = make(JSONPatches, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.PreferredAddressTypes != nil {
|
||||
in, out := &in.PreferredAddressTypes, &out.PreferredAddressTypes
|
||||
*out = make([]KubeletPreferredAddressType, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletSpec.
|
||||
@@ -930,6 +1318,30 @@ func (in *KubernetesDeploymentStatus) DeepCopy() *KubernetesDeploymentStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesGatewayStatus) DeepCopyInto(out *KubernetesGatewayStatus) {
|
||||
*out = *in
|
||||
in.RouteStatus.DeepCopyInto(&out.RouteStatus)
|
||||
out.RouteRef = in.RouteRef
|
||||
if in.AccessPoints != nil {
|
||||
in, out := &in.AccessPoints, &out.AccessPoints
|
||||
*out = make([]GatewayAccessPoint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesGatewayStatus.
|
||||
func (in *KubernetesGatewayStatus) DeepCopy() *KubernetesGatewayStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(KubernetesGatewayStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesIngressStatus) DeepCopyInto(out *KubernetesIngressStatus) {
|
||||
*out = *in
|
||||
@@ -965,7 +1377,7 @@ func (in *KubernetesServiceStatus) DeepCopy() *KubernetesServiceStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesSpec) DeepCopyInto(out *KubernetesSpec) {
|
||||
*out = *in
|
||||
out.Kubelet = in.Kubelet
|
||||
in.Kubelet.DeepCopyInto(&out.Kubelet)
|
||||
if in.AdmissionControllers != nil {
|
||||
in, out := &in.AdmissionControllers, &out.AdmissionControllers
|
||||
*out = make(AdmissionControllers, len(*in))
|
||||
@@ -994,6 +1406,11 @@ func (in *KubernetesStatus) DeepCopyInto(out *KubernetesStatus) {
|
||||
*out = new(KubernetesIngressStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Gateway != nil {
|
||||
in, out := &in.Gateway, &out.Gateway
|
||||
*out = new(KubernetesGatewayStatus)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesStatus.
|
||||
@@ -1029,6 +1446,16 @@ func (in *KubernetesVersion) DeepCopy() *KubernetesVersion {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkProfileSpec) DeepCopyInto(out *NetworkProfileSpec) {
|
||||
*out = *in
|
||||
if in.LoadBalancerSourceRanges != nil {
|
||||
in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.LoadBalancerClass != nil {
|
||||
in, out := &in.LoadBalancerClass, &out.LoadBalancerClass
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSANs != nil {
|
||||
in, out := &in.CertSANs, &out.CertSANs
|
||||
*out = make([]string, len(*in))
|
||||
@@ -1051,6 +1478,21 @@ func (in *NetworkProfileSpec) DeepCopy() *NetworkProfileSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Permissions) DeepCopyInto(out *Permissions) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permissions.
|
||||
func (in *Permissions) DeepCopy() *Permissions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Permissions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PublicKeyPrivateKeyPairStatus) DeepCopyInto(out *PublicKeyPrivateKeyPairStatus) {
|
||||
*out = *in
|
||||
@@ -1067,6 +1509,21 @@ func (in *PublicKeyPrivateKeyPairStatus) DeepCopy() *PublicKeyPrivateKeyPairStat
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RegistrySettings) DeepCopyInto(out *RegistrySettings) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrySettings.
|
||||
func (in *RegistrySettings) DeepCopy() *RegistrySettings {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RegistrySettings)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
|
||||
*out = *in
|
||||
@@ -1087,6 +1544,13 @@ func (in *SecretReference) DeepCopy() *SecretReference {
|
||||
func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
|
||||
*out = *in
|
||||
in.AdditionalMetadata.DeepCopyInto(&out.AdditionalMetadata)
|
||||
if in.AdditionalPorts != nil {
|
||||
in, out := &in.AdditionalPorts, &out.AdditionalPorts
|
||||
*out = make([]AdditionalPort, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec.
|
||||
@@ -1121,7 +1585,11 @@ func (in *StorageStatus) DeepCopy() *StorageStatus {
|
||||
func (in *TLSConfig) DeepCopyInto(out *TLSConfig) {
|
||||
*out = *in
|
||||
in.CertificateAuthority.DeepCopyInto(&out.CertificateAuthority)
|
||||
in.ClientCertificate.DeepCopyInto(&out.ClientCertificate)
|
||||
if in.ClientCertificate != nil {
|
||||
in, out := &in.ClientCertificate, &out.ClientCertificate
|
||||
*out = new(ClientCertificate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig.
|
||||
@@ -1196,6 +1664,12 @@ func (in *TenantControlPlaneList) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TenantControlPlaneSpec) DeepCopyInto(out *TenantControlPlaneSpec) {
|
||||
*out = *in
|
||||
out.WritePermissions = in.WritePermissions
|
||||
if in.DataStoreOverrides != nil {
|
||||
in, out := &in.DataStoreOverrides, &out.DataStoreOverrides
|
||||
*out = make([]DataStoreOverride, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.ControlPlane.DeepCopyInto(&out.ControlPlane)
|
||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||
in.NetworkProfile.DeepCopyInto(&out.NetworkProfile)
|
||||
@@ -1233,3 +1707,18 @@ func (in *TenantControlPlaneStatus) DeepCopy() *TenantControlPlaneStatus {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TenantControlPlaneStatusDataStore) DeepCopyInto(out *TenantControlPlaneStatusDataStore) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantControlPlaneStatusDataStore.
|
||||
func (in *TenantControlPlaneStatusDataStore) DeepCopy() *TenantControlPlaneStatusDataStore {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TenantControlPlaneStatusDataStore)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
|
Before Width: | Height: | Size: 12 KiB |
@@ -1 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" role="img" viewBox="11.85 8.10 202.80 187.55"><title>Kamaji</title><path d="M32.1 13.7c-2.4.9-6.3 3.5-8.6 5.8-7.7 7.7-7.5 5-7.5 82.5 0 77.4-.2 74.8 7.5 82.5 7.7 7.8 4.2 7.5 90 7.5s82.3.3 90-7.5c7.7-7.7 7.5-5.1 7.5-82.5s.2-74.8-7.5-82.5c-7.8-7.8-4.1-7.5-90.4-7.4-66.7 0-77.2.3-81 1.6zm160.5 9.9c1.9.9 4.4 3.1 5.7 4.8l2.2 3.1v141l-2.2 3.1c-4.8 6.7-1.1 6.4-84.8 6.4s-80 .3-84.8-6.4l-2.2-3.1v-141l2.2-3.1c4.8-6.6.8-6.4 84.6-6.4 68 0 76.3.2 79.3 1.6z"/><path d="M90.1 33.7c-5.1 2.5-7.3 6.7-6.8 13.1.3 4.1 1 5.9 3.3 8.4s2.5 3 .9 2.3c-2-.7-25.1-4.6-29-4.9-1.1 0-2 .5-2 1.4 0 1.1-1.2 1.5-4.9 1.5-6.7 0-6.8 1.9-.4 4 8.2 2.7 9 3.4 3.3 3.5-5.3 0-8.2 1.1-7.1 2.8.7 1.2-2.7 2.2-8.1 2.2-7 0-6.5 2.4 1.1 5.1l3.9 1.4-2.9.5c-4.3.8-3.2 2.3 2.8 4.1l5.3 1.5-5.2 2.7c-8.2 4.2-8.3 5.8-.4 6.1 5.6.2 7.3 1.1 4.2 2.1-2.3.7-2.8 3.1-.9 3.7.7.3-.5 2-2.8 4-5.6 5.3-4 6.4 6.2 4.5 4.4-.8 8.1-1.3 8.3-1.2.2.2-1.3 2.4-3.3 4.8-2 2.4-3.6 4.7-3.6 5.2 0 .4 1.4.5 3 .3 2.9-.4 4 .5 2 1.7-.5.3-1 1.3-1 2.2 0 1.6 2.2 1.5 6.5-.3 1.7-.7 1.6-.2-.9 3-5.4 7.2.7 6.5 13.6-1.4 2.7-1.7 5.1-3 5.4-3 .3 0-.9 2.1-2.7 4.6-4.5 6.6-2.5 7.9 3.7 2.3 4.6-4.3 4.7-4.3 3-1.2-1.9 3.8-2.1 5.6-.4 5.1.6-.2 7.1-7.1 14.3-15.4 7.2-8.2 13.7-14.9 14.5-14.9.8 0 7.3 6.7 14.6 15 7.2 8.2 13.7 15.1 14.3 15.3 1.6.5 1.4-1.4-.5-5-1.6-3.2-1.6-3.2 3.2 1 6 5.1 7.8 4 3.5-2.2-1.8-2.5-3-4.6-2.7-4.6.3 0 2.7 1.3 5.4 3 12.9 7.9 19 8.6 13.6 1.4-2.5-3.2-2.6-3.7-.9-3 5.9 2.5 7.7 1.7 5.6-2.3-.9-1.5-.6-1.7 2-1.3 3.8.6 3.7-.5-.7-5.7-2-2.3-3.5-4.4-3.2-4.6.2-.2 2.1 0 4.3.4 13.9 3 16.4 1.8 9.8-4.3-2.1-1.9-3.2-3.6-2.5-3.6 2 0 1.4-2.8-.9-3.5-3.2-1-1.3-2 4.2-2.1 7.9-.2 7.8-1.9-.4-6.1l-5.2-2.7 5.4-1.6c6.4-1.8 7.9-4 2.9-4.1h-3.3l3.9-1.5c7.3-2.6 8.4-5.4 2.2-5.4-5.1 0-9.6-1.1-9-2.2 1.1-1.7-1.8-2.8-7.1-2.8-5.7-.1-4.9-.8 3.3-3.5 6.4-2.1 6.3-4-.4-4-3.7 0-4.9-.4-4.9-1.5 0-.9-.9-1.4-2-1.4-3.9.3-27 4.2-29 4.9-1.6.7-1.4.2.9-2.3 3.7-4 4.7-11.3 2.2-16.1-4.8-9.2-18.8-9.3-23.8 0-4.4 8.3.2 18.4 9.5 20.5 3 .6 2.8.8-5.5 4l-8.8 3.3-8.7-3.3c-8.1-3.2-8.4-3.4-5.5-4.1 1.7-.3 4.3-1.5 5.7-2.7 13.1-10.3.6-30.4-14.4-23.1zm77.6 98.4c-3.6 2.1-.8 7.7 3.2 6.4 2.1-.6 3.5-3.1 2.5-4.6-1.1-1.8-4-2.7-5.7-1.8zm8.3 3.9c0 1.9.5 2.1 6.3 1.8 4.7-.2 6.2-.7 6.2-1.8s-1.5-1.6-6.2-1.8c-5.8-.3-6.3-.1-6.3 1.8zm-135.6.3c-.2.7-.3 7.4-.2 14.8l.3 13.4 3.3.3c3.1.3 3.2.2 3.2-3.4 0-2.5.7-4.6 2.1-6l2.1-2.3 5 6c3.9 4.7 5.6 5.9 7.8 5.9 1.6 0 3.1-.3 3.3-.8.3-.4-2.1-4-5.4-8.1-3.2-4-5.9-7.6-5.9-8 0-.4 2.5-3.1 5.5-6.1 3-3 5.5-5.8 5.5-6.2 0-.4-1.5-.8-3.3-.8-2.8 0-4.4 1-9.6 6.5-3.5 3.6-6.5 6.5-6.7 6.5-.2 0-.4-2.9-.4-6.5V135h-3c-1.7 0-3.3.6-3.6 1.3zm31.2 7c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13.4-4l.3 11.3h6l.5-7.8c.5-7.6 1.5-9.6 4.7-9.7 3 0 4.3 3.2 4.3 10.6v7.4h3c3 0 3 0 3-5.9 0-7.3 1.2-10.7 4.1-11.6 3.8-1.3 5.9 2.5 5.9 10.6v6.9h6v-9c0-8.3-.2-9.3-2.5-11.5-2.9-3-9.8-3.5-12.7-.8-1.7 1.5-1.9 1.5-3.6 0-2.2-2-9.2-2.3-11.1-.5-1.1 1-1.4 1-1.8 0-.3-.6-1.8-1.2-3.4-1.2h-3l.3 11.2zm45.4-9.9c-1.1.8-1.5 1.9-1 3 .5 1.4 1.3 1.6 4 1.1 4.2-.8 8.4.2 8.4 2 0 .8-1.8 1.5-5.1 1.9-6 .7-8.9 2.9-8.9 6.6 0 3.2.8 4.4 3.7 6 2.9 1.5 5.2 1.4 8.6-.3 2.3-1.3 2.7-1.3 2.7 0 0 .9 1.1 1.4 3 1.4h3v-8.6c0-8.1-.1-8.7-2.9-11.5-2.5-2.5-3.7-2.9-8.3-2.9-3 0-6.2.6-7.2 1.3zm11.2 13.9c-.2 1.7-1.1 2.4-3.2 2.6-3.3.4-5.1-1-4.3-3.2.4-1.1 1.9-1.6 4.2-1.6 3.2 0 3.6.3 3.3 2.2zm13-2.5c-.3 12.8-.3 12.8-2.7 12.8-1.5 0-2.7.8-3.1 2-2 5.4 9.4 4.3 11.9-1.2.6-1.3 1.1-7.7 1.1-14.3v-12h-6.9l-.3 12.7zm13.4-1.5l.3 11.3h6v-22l-3.3-.3-3.3-.3.3 11.3z"/></svg>
|
||||
|
Before Width: | Height: | Size: 3.6 KiB |
BIN
assets/logo-black.png
Normal file
|
After Width: | Height: | Size: 8.7 KiB |
BIN
assets/logo-colored.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
assets/logo-white.png
Normal file
|
After Width: | Height: | Size: 11 KiB |
1
assets/logo.svg
Normal file
|
After Width: | Height: | Size: 5.1 KiB |
|
Before Width: | Height: | Size: 119 KiB |
28
charts/kamaji-crds/.helmignore
Normal file
@@ -0,0 +1,28 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
# Helm source files
|
||||
README.md.gotmpl
|
||||
.helmignore
|
||||
# Build tools
|
||||
Makefile
|
||||
39
charts/kamaji-crds/Chart.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
apiVersion: v2
|
||||
appVersion: latest
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
maintainers:
|
||||
- email: dario@tranchitella.eu
|
||||
name: Dario Tranchitella
|
||||
url: https://clastix.io
|
||||
- email: me@bsctl.io
|
||||
name: Adriano Pezzuto
|
||||
url: https://clastix.io
|
||||
name: kamaji-crds
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.0.0+latest
|
||||
annotations:
|
||||
artifacthub.io/crds: |
|
||||
- kind: TenantControlPlane
|
||||
version: v1alpha1
|
||||
name: tenantcontrolplanes.kamaji.clastix.io
|
||||
displayName: TenantControlPlane
|
||||
description: TenantControlPlane defines the desired state for a Control Plane backed by Kamaji.
|
||||
- kind: DataStore
|
||||
version: v1alpha1
|
||||
name: datastores.kamaji.clastix.io
|
||||
displayName: DataStore
|
||||
description: DataStores is holding all the required details to communicate with a Datastore, such as etcd, MySQL, PostgreSQL, and NATS.
|
||||
artifacthub.io/links: |
|
||||
- name: CLASTIX
|
||||
url: https://clastix.io
|
||||
- name: support
|
||||
url: https://clastix.io/support
|
||||
artifacthub.io/changes: |
|
||||
- kind: changed
|
||||
description: Upgrading support to Kubernetes v1.35
|
||||
- kind: added
|
||||
description: Supporting multiple Datastore via etcd overrides
|
||||
9
charts/kamaji-crds/Makefile
Normal file
@@ -0,0 +1,9 @@
|
||||
docs: HELMDOCS_VERSION := v1.8.1
|
||||
docs: docker
|
||||
@docker run --rm -v "$$(pwd):/helm-docs" -u $$(id -u) jnorwood/helm-docs:$(HELMDOCS_VERSION)
|
||||
|
||||
docker:
|
||||
@hash docker 2>/dev/null || {\
|
||||
echo "You need docker" &&\
|
||||
exit 1;\
|
||||
}
|
||||
2
charts/kamaji-crds/NOTES.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
Kamaji Custom Resource Definitions have been installed properly:
|
||||
you can proceed to upgrade your Kamaji operator instance.
|
||||
66
charts/kamaji-crds/README.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# kamaji-crds
|
||||
|
||||
  
|
||||
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | <https://clastix.io> |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | <https://clastix.io> |
|
||||
|
||||
## Source Code
|
||||
|
||||
* <https://github.com/clastix/kamaji>
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) Custom Resource Definitions packaged as Helm Charts.
|
||||
|
||||
## How to use this chart
|
||||
|
||||
Add `clastix` Helm repository:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
Install the Chart with the release name `kamaji-crds`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji-crds clastix/kamaji-crds
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji-crds -n kamaji-system
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji-crds -n kamaji-system clastix/kamaji-crds
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji-crds -n kamaji-system
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the Chart:
|
||||
|
||||
helm upgrade kamaji-crds --install --namespace kamaji-system --create-namespace clastix/kamaji-crds --values myvalues.yaml
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file. Any values in `values.yaml` that weren’t overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
helm upgrade kamaji-crds --install --namespace kamaji-system --create-namespace clastix/kamaji-crds --set kamajiCertificateName=kamaji
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| fullnameOverride | string | `""` | Overrides the full name of the resources created by the chart. |
|
||||
| kamajiCertificateName | string | `"kamaji-serving-cert"` | The cert-manager Certificate resource name, holding the Certificate Authority for webhooks. |
|
||||
| kamajiNamespace | string | `"kamaji-system"` | The namespace where Kamaji has been installed: required to inject the Certificate Authority for cert-manager. |
|
||||
| kamajiService | string | `"kamaji-webhook-service"` | The Kamaji webhook Service name. |
|
||||
| nameOverride | string | `""` | Overrides the name of the chart for resource naming purposes. |
|
||||
54
charts/kamaji-crds/README.md.gotmpl
Normal file
@@ -0,0 +1,54 @@
|
||||
{{ template "chart.header" . }}
|
||||
{{ template "chart.deprecationWarning" . }}
|
||||
|
||||
{{ template "chart.badgesSection" . }}
|
||||
|
||||
{{ template "chart.description" . }}
|
||||
|
||||
{{ template "chart.maintainersSection" . }}
|
||||
|
||||
{{ template "chart.sourcesSection" . }}
|
||||
|
||||
{{ template "chart.requirementsSection" . }}
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) Custom Resource Definitions packaged as Helm Charts.
|
||||
|
||||
## How to use this chart
|
||||
|
||||
Add `clastix` Helm repository:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
Install the Chart with the release name `kamaji-crds`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji-crds clastix/kamaji-crds
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji-crds -n kamaji-system
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji-crds -n kamaji-system clastix/kamaji-crds
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji-crds -n kamaji-system
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during Chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the Chart:
|
||||
|
||||
helm upgrade kamaji-crds --install --namespace kamaji-system --create-namespace clastix/kamaji-crds --values myvalues.yaml
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the Chart's values.yaml file. Any values in `values.yaml` that weren’t overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
helm upgrade kamaji-crds --install --namespace kamaji-system --create-namespace clastix/kamaji-crds --set kamajiCertificateName=kamaji
|
||||
|
||||
{{ template "chart.valuesSection" . }}
|
||||
11
charts/kamaji-crds/hack/crd-conversion.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: kamaji-webhook-service
|
||||
namespace: kamaji-system
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
292
charts/kamaji-crds/hack/kamaji.clastix.io_datastores_spec.yaml
Normal file
@@ -0,0 +1,292 @@
|
||||
group: kamaji.clastix.io
|
||||
names:
|
||||
kind: DataStore
|
||||
listKind: DataStoreList
|
||||
plural: datastores
|
||||
singular: datastore
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Kamaji data store driver
|
||||
jsonPath: .spec.driver
|
||||
name: Driver
|
||||
type: string
|
||||
- description: Age
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- password
|
||||
- username
|
||||
type: object
|
||||
driver:
|
||||
description: The driver to use to connect to the shared datastore.
|
||||
enum:
|
||||
- etcd
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
- NATS
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: Datastore driver is immutable
|
||||
rule: self == oldSelf
|
||||
endpoints:
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
tlsConfig:
|
||||
description: |-
|
||||
Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
This value is optional.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- certificate
|
||||
type: object
|
||||
clientCertificate:
|
||||
description: Specifies the SSL/TLS key and private key pair used to connect to the data store.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
description: name is unique within a namespace to reference a secret resource.
|
||||
type: string
|
||||
namespace:
|
||||
description: namespace defines the space within which the secret name must be unique.
|
||||
type: string
|
||||
required:
|
||||
- keyPath
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
type: object
|
||||
required:
|
||||
- certificate
|
||||
- privateKey
|
||||
type: object
|
||||
required:
|
||||
- certificateAuthority
|
||||
type: object
|
||||
required:
|
||||
- driver
|
||||
- endpoints
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: certificateAuthority privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.certificateAuthority.privateKey.secretReference) || has(self.tlsConfig.certificateAuthority.privateKey.content))) : true'
|
||||
- message: clientCertificate must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content))) : true'
|
||||
- message: clientCertificate privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.privateKey.secretReference) || has(self.tlsConfig.clientCertificate.privateKey.content))) : true'
|
||||
- message: When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content
|
||||
rule: '(self.driver != "etcd" && has(self.tlsConfig) && has(self.tlsConfig.clientCertificate)) ? (((has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content)))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, username must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.username.secretReference) || has(self.basicAuth.username.content))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, password must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.password.secretReference) || has(self.basicAuth.password.content))) : true'
|
||||
- message: When driver is not etcd, either tlsConfig or basicAuth must be provided
|
||||
rule: '(self.driver != "etcd") ? (has(self.tlsConfig) || has(self.basicAuth)) : true'
|
||||
status:
|
||||
description: DataStoreStatus defines the observed state of DataStore.
|
||||
properties:
|
||||
observedGeneration:
|
||||
description: ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
format: int64
|
||||
type: integer
|
||||
usedBy:
|
||||
description: List of the Tenant Control Planes, namespaced named, using this data store.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -0,0 +1,218 @@
|
||||
group: kamaji.clastix.io
|
||||
names:
|
||||
categories:
|
||||
- kamaji
|
||||
kind: KubeconfigGenerator
|
||||
listKind: KubeconfigGeneratorList
|
||||
plural: kubeconfiggenerators
|
||||
shortNames:
|
||||
- kc
|
||||
singular: kubeconfiggenerator
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Age
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: KubeconfigGenerator is the Schema for the kubeconfiggenerators API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
properties:
|
||||
controlPlaneEndpointFrom:
|
||||
default: admin.svc
|
||||
description: |-
|
||||
ControlPlaneEndpointFrom is the key used to extract the Tenant Control Plane endpoint that must be used by the generator.
|
||||
The targeted Secret is the `${TCP}-admin-kubeconfig` one, default to `admin.svc`.
|
||||
type: string
|
||||
groups:
|
||||
description: |-
|
||||
Groups is resolved a set of strings used to assign the x509 organisations field.
|
||||
It will be recognised by Kubernetes as user groups.
|
||||
items:
|
||||
description: |-
|
||||
CompoundValue allows defining a static, or a dynamic value.
|
||||
Options are mutually exclusive, just one should be picked up.
|
||||
properties:
|
||||
fromDefinition:
|
||||
description: |-
|
||||
FromDefinition is used to generate a dynamic value,
|
||||
it uses the dot notation to access fields from the referenced TenantControlPlane object:
|
||||
e.g.: metadata.name
|
||||
type: string
|
||||
stringValue:
|
||||
description: StringValue is a static string value.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Either stringValue or fromDefinition must be set, but not both.
|
||||
rule: (has(self.stringValue) || has(self.fromDefinition)) && !(has(self.stringValue) && has(self.fromDefinition))
|
||||
type: array
|
||||
namespaceSelector:
|
||||
description: NamespaceSelector is used to filter Namespaces from which the generator should extract TenantControlPlane objects.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
tenantControlPlaneSelector:
|
||||
description: TenantControlPlaneSelector is used to filter the TenantControlPlane objects that should be address by the generator.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
user:
|
||||
description: User resolves to a string to identify the client, assigned to the x509 Common Name field.
|
||||
properties:
|
||||
fromDefinition:
|
||||
description: |-
|
||||
FromDefinition is used to generate a dynamic value,
|
||||
it uses the dot notation to access fields from the referenced TenantControlPlane object:
|
||||
e.g.: metadata.name
|
||||
type: string
|
||||
stringValue:
|
||||
description: StringValue is a static string value.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Either stringValue or fromDefinition must be set, but not both.
|
||||
rule: (has(self.stringValue) || has(self.fromDefinition)) && !(has(self.stringValue) && has(self.fromDefinition))
|
||||
required:
|
||||
- user
|
||||
type: object
|
||||
status:
|
||||
description: KubeconfigGeneratorStatus defines the observed state of KubeconfigGenerator.
|
||||
properties:
|
||||
availableResources:
|
||||
default: 0
|
||||
description: |-
|
||||
AvailableResources is the sum of successfully generated resources.
|
||||
In case of a different value compared to Resources, check the field errors.
|
||||
type: integer
|
||||
errors:
|
||||
description: Errors is the list of failed kubeconfig generations.
|
||||
items:
|
||||
properties:
|
||||
message:
|
||||
description: Message is the error message recorded upon the last generator run.
|
||||
type: string
|
||||
resource:
|
||||
description: Resource is the Namespaced name of the errored resource.
|
||||
type: string
|
||||
required:
|
||||
- message
|
||||
- resource
|
||||
type: object
|
||||
type: array
|
||||
observedGeneration:
|
||||
description: ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
format: int64
|
||||
type: integer
|
||||
resources:
|
||||
default: 0
|
||||
description: Resources is the sum of targeted TenantControlPlane objects.
|
||||
type: integer
|
||||
required:
|
||||
- availableResources
|
||||
- resources
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
49
charts/kamaji-crds/templates/_helpers.tpl
Normal file
@@ -0,0 +1,49 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "kamaji-crds.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "kamaji.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "kamaji-crds.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the cert-manager annotation to inject Certificate CA.
|
||||
*/}}
|
||||
{{- define "kamaji-crds.certManagerAnnotation" -}}
|
||||
{{- printf "%s/%s" (required "A valid .Values.kamajiNamespace is required" .Values.kamajiNamespace) (required "A valid .Values.kamajiCertificateName is required" .Values.kamajiCertificateName) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "kamaji-crds.labels" -}}
|
||||
helm.sh/chart: {{ include "kamaji-crds.chart" . }}
|
||||
app.kubernetes.io/name: {{ include "kamaji-crds.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/component: "crds"
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ include "kamaji-crds.certManagerAnnotation" . }}
|
||||
labels:
|
||||
{{- include "kamaji-crds.labels" . | nindent 4 }}
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
{{ tpl (.Files.Get "hack/kamaji.clastix.io_datastores_spec.yaml") . | nindent 2}}
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ include "kamaji-crds.certManagerAnnotation" . }}
|
||||
labels:
|
||||
{{- include "kamaji-crds.labels" . | nindent 4 }}
|
||||
name: kubeconfiggenerators.kamaji.clastix.io
|
||||
spec:
|
||||
{{ tpl (.Files.Get "hack/kamaji.clastix.io_kubeconfiggenerators_spec.yaml") . | nindent 2 }}
|
||||
@@ -0,0 +1,10 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: {{ include "kamaji-crds.certManagerAnnotation" . }}
|
||||
labels:
|
||||
{{- include "kamaji-crds.labels" . | nindent 4 }}
|
||||
name: tenantcontrolplanes.kamaji.clastix.io
|
||||
spec:
|
||||
{{ tpl (.Files.Get "hack/kamaji.clastix.io_tenantcontrolplanes_spec.yaml") . | nindent 2 }}
|
||||
15
charts/kamaji-crds/values.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
# Default values for kamaji-crds.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
# -- Overrides the name of the chart for resource naming purposes.
|
||||
nameOverride: ""
|
||||
# -- Overrides the full name of the resources created by the chart.
|
||||
fullnameOverride: ""
|
||||
|
||||
# -- The namespace where Kamaji has been installed: required to inject the Certificate Authority for cert-manager.
|
||||
kamajiNamespace: kamaji-system
|
||||
# -- The Kamaji webhook Service name.
|
||||
kamajiService: kamaji-webhook-service
|
||||
# -- The cert-manager Certificate resource name, holding the Certificate Authority for webhooks.
|
||||
kamajiCertificateName: kamaji-serving-cert
|
||||
@@ -21,3 +21,8 @@
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
# Helm source files
|
||||
README.md.gotmpl
|
||||
.helmignore
|
||||
# Build tools
|
||||
Makefile
|
||||
|
||||
6
charts/kamaji/Chart.lock
Normal file
@@ -0,0 +1,6 @@
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: 0.11.0
|
||||
digest: sha256:96b4115b8c02f771f809ec1bed3be3a3903e7e8315d6966aa54b0f73230ea421
|
||||
generated: "2025-07-03T09:19:19.835421461+02:00"
|
||||
@@ -1,24 +1,50 @@
|
||||
apiVersion: v2
|
||||
appVersion: v0.2.2
|
||||
description: Kamaji is a tool aimed to build and operate a Managed Kubernetes Service
|
||||
with a fraction of the operational burden. With Kamaji, you can deploy and operate
|
||||
hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
appVersion: latest
|
||||
description: Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
home: https://github.com/clastix/kamaji
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/kamaji-logo.png
|
||||
icon: https://github.com/clastix/kamaji/raw/master/assets/logo-colored.png
|
||||
kubeVersion: ">=1.21.0-0"
|
||||
maintainers:
|
||||
- email: dario@tranchitella.eu
|
||||
name: Dario Tranchitella
|
||||
url: https://clastix.io
|
||||
- email: me@maxgio.it
|
||||
name: Massimiliano Giovagnoli
|
||||
- email: me@bsctl.io
|
||||
name: Adriano Pezzuto
|
||||
url: https://clastix.io
|
||||
name: kamaji
|
||||
sources:
|
||||
- https://github.com/clastix/kamaji
|
||||
type: application
|
||||
version: 0.11.4
|
||||
version: 0.0.0+latest
|
||||
dependencies:
|
||||
- name: kamaji-etcd
|
||||
repository: https://clastix.github.io/charts
|
||||
version: ">=0.11.0"
|
||||
condition: kamaji-etcd.deploy
|
||||
annotations:
|
||||
catalog.cattle.io/certified: partner
|
||||
catalog.cattle.io/release-name: kamaji
|
||||
catalog.cattle.io/display-name: Kamaji - Managed Kubernetes Service
|
||||
catalog.cattle.io/display-name: Kamaji
|
||||
artifacthub.io/crds: |
|
||||
- kind: TenantControlPlane
|
||||
version: v1alpha1
|
||||
name: tenantcontrolplanes.kamaji.clastix.io
|
||||
displayName: TenantControlPlane
|
||||
description: TenantControlPlane defines the desired state for a Control Plane backed by Kamaji.
|
||||
- kind: DataStore
|
||||
version: v1alpha1
|
||||
name: datastores.kamaji.clastix.io
|
||||
displayName: DataStore
|
||||
description: DataStores is holding all the required details to communicate with a Datastore, such as etcd, MySQL, PostgreSQL, and NATS.
|
||||
artifacthub.io/links: |
|
||||
- name: CLASTIX
|
||||
url: https://clastix.io
|
||||
- name: support
|
||||
url: https://clastix.io/support
|
||||
artifacthub.io/operator: "true"
|
||||
artifacthub.io/operatorCapabilities: "full lifecycle"
|
||||
artifacthub.io/changes: |
|
||||
- kind: added
|
||||
description: Releasing latest chart at every push
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
# kamaji
|
||||
|
||||
  
|
||||
  
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden. With Kamaji, you can deploy and operate hundreds of Kubernetes clusters as a hyper-scaler.
|
||||
Kamaji is the Hosted Control Plane Manager for Kubernetes.
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| ---- | ------ | --- |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | |
|
||||
| Dario Tranchitella | <dario@tranchitella.eu> | <https://clastix.io> |
|
||||
| Massimiliano Giovagnoli | <me@maxgio.it> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | |
|
||||
| Adriano Pezzuto | <me@bsctl.io> | <https://clastix.io> |
|
||||
|
||||
## Source Code
|
||||
|
||||
@@ -20,6 +20,10 @@ Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a
|
||||
|
||||
Kubernetes: `>=1.21.0-0`
|
||||
|
||||
| Repository | Name | Version |
|
||||
|------------|------|---------|
|
||||
| https://clastix.github.io/charts | kamaji-etcd | >=0.11.0 |
|
||||
|
||||
[Kamaji](https://github.com/clastix/kamaji) requires a [multi-tenant `etcd`](https://github.com/clastix/kamaji-internal/blob/master/deploy/getting-started-with-kamaji.md#setup-internal-multi-tenant-etcd) cluster.
|
||||
This Helm Chart starting from v0.1.1 provides the installation of an internal `etcd` in order to streamline the local test. If you'd like to use an externally managed etcd instance, you can specify the overrides and by setting the value `etcd.deploy=false`.
|
||||
|
||||
@@ -27,9 +31,13 @@ This Helm Chart starting from v0.1.1 provides the installation of an internal `e
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
To add clastix helm repository:
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
@@ -66,44 +74,7 @@ Here the values you can override:
|
||||
| Key | Type | Default | Description |
|
||||
|-----|------|---------|-------------|
|
||||
| affinity | object | `{}` | Kubernetes affinity rules to apply to Kamaji controller pods |
|
||||
| datastore.basicAuth.passwordSecret.keyPath | string | `nil` | The Secret key where the data is stored. |
|
||||
| datastore.basicAuth.passwordSecret.name | string | `nil` | The name of the Secret containing the password used to connect to the relational database. |
|
||||
| datastore.basicAuth.passwordSecret.namespace | string | `nil` | The namespace of the Secret containing the password used to connect to the relational database. |
|
||||
| datastore.basicAuth.usernameSecret.keyPath | string | `nil` | The Secret key where the data is stored. |
|
||||
| datastore.basicAuth.usernameSecret.name | string | `nil` | The name of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.basicAuth.usernameSecret.namespace | string | `nil` | The namespace of the Secret containing the username used to connect to the relational database. |
|
||||
| datastore.driver | string | `"etcd"` | (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd). |
|
||||
| datastore.endpoints | list | `[]` | (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically. |
|
||||
| datastore.nameOverride | string | `nil` | The Datastore name override, if empty defaults to `default` |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.name | string | `nil` | Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.certificate.namespace | string | `nil` | Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.privateKey.keyPath | string | `nil` | Key of the Secret which contains the content of the private key. |
|
||||
| datastore.tlsConfig.certificateAuthority.privateKey.name | string | `nil` | Name of the Secret containing the CA private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.certificateAuthority.privateKey.namespace | string | `nil` | Namespace of the Secret containing the CA private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.clientCertificate.certificate.keyPath | string | `nil` | Key of the Secret which contains the content of the certificate. |
|
||||
| datastore.tlsConfig.clientCertificate.certificate.name | string | `nil` | Name of the Secret containing the client certificate required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.clientCertificate.certificate.namespace | string | `nil` | Namespace of the Secret containing the client certificate required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.keyPath | string | `nil` | Key of the Secret which contains the content of the private key. |
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.name | string | `nil` | Name of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| datastore.tlsConfig.clientCertificate.privateKey.namespace | string | `nil` | Namespace of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore. |
|
||||
| etcd.compactionInterval | int | `0` | ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled)) |
|
||||
| etcd.deploy | bool | `true` | Install an etcd with enabled multi-tenancy along with Kamaji |
|
||||
| etcd.image | object | `{"pullPolicy":"IfNotPresent","repository":"quay.io/coreos/etcd","tag":"v3.5.6"}` | Install specific etcd image |
|
||||
| etcd.livenessProbe | object | `{"failureThreshold":8,"httpGet":{"path":"/health?serializable=true","port":2381,"scheme":"HTTP"},"initialDelaySeconds":10,"periodSeconds":10,"timeoutSeconds":15}` | The livenessProbe for the etcd container |
|
||||
| etcd.overrides.caSecret.name | string | `"etcd-certs"` | Name of the secret which contains CA's certificate and private key. (default: "etcd-certs") |
|
||||
| etcd.overrides.caSecret.namespace | string | `"kamaji-system"` | Namespace of the secret which contains CA's certificate and private key. (default: "kamaji-system") |
|
||||
| etcd.overrides.clientSecret.name | string | `"root-client-certs"` | Name of the secret which contains ETCD client certificates. (default: "root-client-certs") |
|
||||
| etcd.overrides.clientSecret.namespace | string | `"kamaji-system"` | Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system") |
|
||||
| etcd.overrides.endpoints | object | `{"etcd-0":"etcd-0.etcd.kamaji-system.svc.cluster.local","etcd-1":"etcd-1.etcd.kamaji-system.svc.cluster.local","etcd-2":"etcd-2.etcd.kamaji-system.svc.cluster.local"}` | (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define the protocol (TLS is automatically inflected), or any port, inflected from .etcd.peerApiPort value. |
|
||||
| etcd.peerApiPort | int | `2380` | The peer API port which servers are listening to. |
|
||||
| etcd.persistence.accessModes[0] | string | `"ReadWriteOnce"` | |
|
||||
| etcd.persistence.customAnnotations | object | `{}` | The custom annotations to add to the PVC |
|
||||
| etcd.persistence.size | string | `"10Gi"` | |
|
||||
| etcd.persistence.storageClass | string | `""` | |
|
||||
| etcd.port | int | `2379` | The client request port. |
|
||||
| etcd.serviceAccount.create | bool | `true` | Create a ServiceAccount, required to install and provision the etcd backing storage (default: true) |
|
||||
| etcd.serviceAccount.name | string | `""` | Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "") |
|
||||
| defaultDatastoreName | string | `"default"` | If specified, all the Kamaji instances with an unassigned DataStore will inherit this default value. |
|
||||
| extraArgs | list | `[]` | A list of extra arguments to add to the kamaji controller default ones |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. (default ":8081") |
|
||||
@@ -111,9 +82,28 @@ Here the values you can override:
|
||||
| image.repository | string | `"clastix/kamaji"` | The container image of the Kamaji controller. |
|
||||
| image.tag | string | `nil` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| imagePullSecrets | list | `[]` | |
|
||||
| kamaji-etcd | object | `{"clusterDomain":"cluster.local","datastore":{"enabled":true,"name":"default"},"deploy":true,"fullnameOverride":"kamaji-etcd"}` | Subchart: See https://github.com/clastix/kamaji-etcd/blob/master/charts/kamaji-etcd/values.yaml |
|
||||
| kubeconfigGenerator.affinity | object | `{}` | Kubernetes affinity rules to apply to Kubeconfig Generator controller pods |
|
||||
| kubeconfigGenerator.enableLeaderElect | bool | `true` | Enables the leader election. |
|
||||
| kubeconfigGenerator.enabled | bool | `false` | Toggle to deploy the Kubeconfig Generator Deployment. |
|
||||
| kubeconfigGenerator.extraArgs | list | `[]` | A list of extra arguments to add to the Kubeconfig Generator controller default ones. |
|
||||
| kubeconfigGenerator.fullnameOverride | string | `""` | |
|
||||
| kubeconfigGenerator.healthProbeBindAddress | string | `":8081"` | The address the probe endpoint binds to. |
|
||||
| kubeconfigGenerator.loggingDevel.enable | bool | `false` | Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) |
|
||||
| kubeconfigGenerator.nodeSelector | object | `{}` | Kubernetes node selector rules to schedule Kubeconfig Generator controller |
|
||||
| kubeconfigGenerator.podAnnotations | object | `{}` | The annotations to apply to the Kubeconfig Generator controller pods. |
|
||||
| kubeconfigGenerator.podSecurityContext | object | `{"runAsNonRoot":true}` | The securityContext to apply to the Kubeconfig Generator controller pods. |
|
||||
| kubeconfigGenerator.replicaCount | int | `2` | The number of the pod replicas for the Kubeconfig Generator controller. |
|
||||
| kubeconfigGenerator.resources.limits.cpu | string | `"200m"` | |
|
||||
| kubeconfigGenerator.resources.limits.memory | string | `"512Mi"` | |
|
||||
| kubeconfigGenerator.resources.requests.cpu | string | `"200m"` | |
|
||||
| kubeconfigGenerator.resources.requests.memory | string | `"512Mi"` | |
|
||||
| kubeconfigGenerator.securityContext | object | `{"allowPrivilegeEscalation":false}` | The securityContext to apply to the Kubeconfig Generator controller container only. |
|
||||
| kubeconfigGenerator.serviceAccountOverride | string | `""` | The name of the service account to use. If not set, the root Kamaji one will be used. |
|
||||
| kubeconfigGenerator.tolerations | list | `[]` | Kubernetes node taints that the Kubeconfig Generator controller pods would tolerate |
|
||||
| livenessProbe | object | `{"httpGet":{"path":"/healthz","port":"healthcheck"},"initialDelaySeconds":15,"periodSeconds":20}` | The livenessProbe for the controller container |
|
||||
| loggingDevel.enable | bool | `false` | (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |
|
||||
| metricsBindAddress | string | `":8080"` | (string) The address the metric endpoint binds to. (default ":8080") |
|
||||
| loggingDevel.enable | bool | `false` | Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false) |
|
||||
| metricsBindAddress | string | `":8080"` | The address the metric endpoint binds to. (default ":8080") |
|
||||
| nameOverride | string | `""` | |
|
||||
| nodeSelector | object | `{}` | Kubernetes node selector rules to schedule Kamaji controller |
|
||||
| podAnnotations | object | `{}` | The annotations to apply to the Kamaji controller pods. |
|
||||
@@ -129,6 +119,7 @@ Here the values you can override:
|
||||
| serviceAccount.create | bool | `true` | |
|
||||
| serviceAccount.name | string | `"kamaji-controller-manager"` | |
|
||||
| serviceMonitor.enabled | bool | `false` | Toggle the ServiceMonitor true if you have Prometheus Operator installed and configured |
|
||||
| telemetry | object | `{"disabled":false}` | Disable the analytics traces collection |
|
||||
| temporaryDirectoryPath | string | `"/tmp/kamaji"` | Directory which will be used to work with temporary files. (default "/tmp/kamaji") |
|
||||
| tolerations | list | `[]` | Kubernetes node taints that the Kamaji controller pods would tolerate |
|
||||
|
||||
|
||||
@@ -18,10 +18,15 @@ This Helm Chart starting from v0.1.1 provides the installation of an internal `e
|
||||
|
||||
## Install Kamaji
|
||||
|
||||
To add clastix helm repository:
|
||||
|
||||
|
||||
helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace kamaji clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Kamaji - Managed Kubernetes Service
|
||||
|
||||
Kamaji is a tool aimed to build and operate a Managed Kubernetes Service with a fraction of the operational burden.
|
||||
|
||||
Useful links:
|
||||
- [Kamaji Github repository](https://github.com/clastix/kamaji)
|
||||
- [Kamaji Documentation](https://github.com/clastix/kamaji/docs/)
|
||||
|
||||
## Requirements
|
||||
|
||||
* Kubernetes v1.22+
|
||||
* Helm v3
|
||||
|
||||
# Installation
|
||||
|
||||
To install the Chart with the release name `kamaji`:
|
||||
|
||||
helm upgrade --install --namespace kamaji-system --create-namespace clastix/kamaji
|
||||
|
||||
Show the status:
|
||||
|
||||
helm status kamaji -n kamaji-system
|
||||
|
||||
Upgrade the Chart
|
||||
|
||||
helm upgrade kamaji -n kamaji-system clastix/kamaji
|
||||
|
||||
Uninstall the Chart
|
||||
|
||||
helm uninstall kamaji -n kamaji-system
|
||||
119
charts/kamaji/controller-gen/clusterrole.yaml
Normal file
@@ -0,0 +1,119 @@
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- gateways
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- gateway.networking.k8s.io
|
||||
resources:
|
||||
- grpcroutes
|
||||
- httproutes
|
||||
- tlsroutes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- datastores
|
||||
- tenantcontrolplanes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- datastores/status
|
||||
- kubeconfiggenerators/status
|
||||
- tenantcontrolplanes/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- kubeconfiggenerators
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- kubeconfiggenerators/finalizers
|
||||
- tenantcontrolplanes/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
11
charts/kamaji/controller-gen/crd-conversion.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
spec:
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: kamaji-webhook-service
|
||||
namespace: kamaji-system
|
||||
path: /convert
|
||||
conversionReviewVersions:
|
||||
- v1
|
||||
20
charts/kamaji/controller-gen/mutating-webhook.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: '{{ include "kamaji.webhookServiceName" . }}'
|
||||
namespace: '{{ .Release.Namespace }}'
|
||||
path: /mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane
|
||||
failurePolicy: Fail
|
||||
name: mtenantcontrolplane.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
sideEffects: None
|
||||
81
charts/kamaji/controller-gen/validating-webhook.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: '{{ include "kamaji.webhookServiceName" . }}'
|
||||
namespace: '{{ .Release.Namespace }}'
|
||||
path: /telemetry
|
||||
failurePolicy: Ignore
|
||||
name: telemetry.kamaji.clastix.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: '{{ include "kamaji.webhookServiceName" . }}'
|
||||
namespace: '{{ .Release.Namespace }}'
|
||||
path: /validate-kamaji-clastix-io-v1alpha1-datastore
|
||||
failurePolicy: Fail
|
||||
name: vdatastore.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: '{{ include "kamaji.webhookServiceName" . }}'
|
||||
namespace: '{{ .Release.Namespace }}'
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: '{{ include "kamaji.webhookServiceName" . }}'
|
||||
namespace: '{{ .Release.Namespace }}'
|
||||
path: /validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane
|
||||
failurePolicy: Fail
|
||||
name: vtenantcontrolplane.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
sideEffects: None
|
||||
@@ -4,7 +4,7 @@ kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
|
||||
controller-gen.kubebuilder.io/version: v0.9.2
|
||||
controller-gen.kubebuilder.io/version: v0.20.0
|
||||
name: datastores.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
@@ -30,10 +30,19 @@ spec:
|
||||
description: DataStore is the Schema for the datastores API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
@@ -41,18 +50,24 @@ spec:
|
||||
description: DataStoreSpec defines the desired state of DataStore.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: In case of authentication enabled for the given data store, specifies the username and password pair. This value is optional.
|
||||
description: |-
|
||||
In case of authentication enabled for the given data store, specifies the username and password pair.
|
||||
This value is optional.
|
||||
properties:
|
||||
password:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -69,13 +84,17 @@ spec:
|
||||
username:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -99,29 +118,43 @@ spec:
|
||||
- etcd
|
||||
- MySQL
|
||||
- PostgreSQL
|
||||
- NATS
|
||||
type: string
|
||||
x-kubernetes-validations:
|
||||
- message: Datastore driver is immutable
|
||||
rule: self == oldSelf
|
||||
endpoints:
|
||||
description: List of the endpoints to connect to the shared datastore. No need for protocol, just bare IP/FQDN and port.
|
||||
description: |-
|
||||
List of the endpoints to connect to the shared datastore.
|
||||
No need for protocol, just bare IP/FQDN and port.
|
||||
items:
|
||||
type: string
|
||||
minItems: 1
|
||||
type: array
|
||||
tlsConfig:
|
||||
description: Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
description: |-
|
||||
Defines the TLS/SSL configuration required to connect to the data store in a secure way.
|
||||
This value is optional.
|
||||
properties:
|
||||
certificateAuthority:
|
||||
description: Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference. The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
description: |-
|
||||
Retrieve the Certificate Authority certificate and private key, such as bare content of the file, or a SecretReference.
|
||||
The key reference is required since etcd authentication is based on certificates, and Kamaji is responsible in creating this.
|
||||
properties:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -138,13 +171,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -167,13 +204,17 @@ spec:
|
||||
certificate:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -190,13 +231,17 @@ spec:
|
||||
privateKey:
|
||||
properties:
|
||||
content:
|
||||
description: Bare content of the file, base64 encoded. It has precedence over the SecretReference value.
|
||||
description: |-
|
||||
Bare content of the file, base64 encoded.
|
||||
It has precedence over the SecretReference value.
|
||||
format: byte
|
||||
type: string
|
||||
secretReference:
|
||||
properties:
|
||||
keyPath:
|
||||
description: Name of the key for the given Secret reference where the content is stored. This value is mandatory.
|
||||
description: |-
|
||||
Name of the key for the given Secret reference where the content is stored.
|
||||
This value is mandatory.
|
||||
minLength: 1
|
||||
type: string
|
||||
name:
|
||||
@@ -216,16 +261,33 @@ spec:
|
||||
type: object
|
||||
required:
|
||||
- certificateAuthority
|
||||
- clientCertificate
|
||||
type: object
|
||||
required:
|
||||
- driver
|
||||
- endpoints
|
||||
- tlsConfig
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: certificateAuthority privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.certificateAuthority.privateKey.secretReference) || has(self.tlsConfig.certificateAuthority.privateKey.content))) : true'
|
||||
- message: clientCertificate must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content))) : true'
|
||||
- message: clientCertificate privateKey must have secretReference or content when driver is etcd
|
||||
rule: '(self.driver == "etcd") ? (self.tlsConfig != null && (has(self.tlsConfig.clientCertificate.privateKey.secretReference) || has(self.tlsConfig.clientCertificate.privateKey.content))) : true'
|
||||
- message: When driver is not etcd and tlsConfig exists, clientCertificate must be null or contain valid content
|
||||
rule: '(self.driver != "etcd" && has(self.tlsConfig) && has(self.tlsConfig.clientCertificate)) ? (((has(self.tlsConfig.clientCertificate.certificate.secretReference) || has(self.tlsConfig.clientCertificate.certificate.content)))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, username must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.username.secretReference) || has(self.basicAuth.username.content))) : true'
|
||||
- message: When driver is not etcd and basicAuth exists, password must have secretReference or content
|
||||
rule: '(self.driver != "etcd" && has(self.basicAuth)) ? ((has(self.basicAuth.password.secretReference) || has(self.basicAuth.password.content))) : true'
|
||||
- message: When driver is not etcd, either tlsConfig or basicAuth must be provided
|
||||
rule: '(self.driver != "etcd") ? (has(self.tlsConfig) || has(self.basicAuth)) : true'
|
||||
status:
|
||||
description: DataStoreStatus defines the observed state of DataStore.
|
||||
properties:
|
||||
observedGeneration:
|
||||
description: ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
format: int64
|
||||
type: integer
|
||||
usedBy:
|
||||
description: List of the Tenant Control Planes, namespaced named, using this data store.
|
||||
items:
|
||||
226
charts/kamaji/crds/kamaji.clastix.io_kubeconfiggenerators.yaml
Normal file
@@ -0,0 +1,226 @@
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
cert-manager.io/inject-ca-from: kamaji-system/kamaji-serving-cert
|
||||
controller-gen.kubebuilder.io/version: v0.20.0
|
||||
name: kubeconfiggenerators.kamaji.clastix.io
|
||||
spec:
|
||||
group: kamaji.clastix.io
|
||||
names:
|
||||
categories:
|
||||
- kamaji
|
||||
kind: KubeconfigGenerator
|
||||
listKind: KubeconfigGeneratorList
|
||||
plural: kubeconfiggenerators
|
||||
shortNames:
|
||||
- kc
|
||||
singular: kubeconfiggenerator
|
||||
scope: Cluster
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Age
|
||||
jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1alpha1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: KubeconfigGenerator is the Schema for the kubeconfiggenerators API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
properties:
|
||||
controlPlaneEndpointFrom:
|
||||
default: admin.svc
|
||||
description: |-
|
||||
ControlPlaneEndpointFrom is the key used to extract the Tenant Control Plane endpoint that must be used by the generator.
|
||||
The targeted Secret is the `${TCP}-admin-kubeconfig` one, default to `admin.svc`.
|
||||
type: string
|
||||
groups:
|
||||
description: |-
|
||||
Groups is resolved a set of strings used to assign the x509 organisations field.
|
||||
It will be recognised by Kubernetes as user groups.
|
||||
items:
|
||||
description: |-
|
||||
CompoundValue allows defining a static, or a dynamic value.
|
||||
Options are mutually exclusive, just one should be picked up.
|
||||
properties:
|
||||
fromDefinition:
|
||||
description: |-
|
||||
FromDefinition is used to generate a dynamic value,
|
||||
it uses the dot notation to access fields from the referenced TenantControlPlane object:
|
||||
e.g.: metadata.name
|
||||
type: string
|
||||
stringValue:
|
||||
description: StringValue is a static string value.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Either stringValue or fromDefinition must be set, but not both.
|
||||
rule: (has(self.stringValue) || has(self.fromDefinition)) && !(has(self.stringValue) && has(self.fromDefinition))
|
||||
type: array
|
||||
namespaceSelector:
|
||||
description: NamespaceSelector is used to filter Namespaces from which the generator should extract TenantControlPlane objects.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
tenantControlPlaneSelector:
|
||||
description: TenantControlPlaneSelector is used to filter the TenantControlPlane objects that should be address by the generator.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: |-
|
||||
A label selector requirement is a selector that contains values, a key, and an operator that
|
||||
relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: |-
|
||||
operator represents a key's relationship to a set of values.
|
||||
Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: |-
|
||||
values is an array of string values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced during a strategic
|
||||
merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
x-kubernetes-list-type: atomic
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
x-kubernetes-map-type: atomic
|
||||
user:
|
||||
description: User resolves to a string to identify the client, assigned to the x509 Common Name field.
|
||||
properties:
|
||||
fromDefinition:
|
||||
description: |-
|
||||
FromDefinition is used to generate a dynamic value,
|
||||
it uses the dot notation to access fields from the referenced TenantControlPlane object:
|
||||
e.g.: metadata.name
|
||||
type: string
|
||||
stringValue:
|
||||
description: StringValue is a static string value.
|
||||
type: string
|
||||
type: object
|
||||
x-kubernetes-validations:
|
||||
- message: Either stringValue or fromDefinition must be set, but not both.
|
||||
rule: (has(self.stringValue) || has(self.fromDefinition)) && !(has(self.stringValue) && has(self.fromDefinition))
|
||||
required:
|
||||
- user
|
||||
type: object
|
||||
status:
|
||||
description: KubeconfigGeneratorStatus defines the observed state of KubeconfigGenerator.
|
||||
properties:
|
||||
availableResources:
|
||||
default: 0
|
||||
description: |-
|
||||
AvailableResources is the sum of successfully generated resources.
|
||||
In case of a different value compared to Resources, check the field errors.
|
||||
type: integer
|
||||
errors:
|
||||
description: Errors is the list of failed kubeconfig generations.
|
||||
items:
|
||||
properties:
|
||||
message:
|
||||
description: Message is the error message recorded upon the last generator run.
|
||||
type: string
|
||||
resource:
|
||||
description: Resource is the Namespaced name of the errored resource.
|
||||
type: string
|
||||
required:
|
||||
- message
|
||||
- resource
|
||||
type: object
|
||||
type: array
|
||||
observedGeneration:
|
||||
description: ObservedGeneration represents the .metadata.generation that was last reconciled.
|
||||
format: int64
|
||||
type: integer
|
||||
resources:
|
||||
default: 0
|
||||
description: Resources is the sum of targeted TenantControlPlane objects.
|
||||
type: integer
|
||||
required:
|
||||
- availableResources
|
||||
- resources
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
8859
charts/kamaji/crds/kamaji.clastix.io_tenantcontrolplanes.yaml
Normal file
@@ -89,3 +89,15 @@ Create the name of the cert-manager Certificate
|
||||
{{- define "kamaji.certificateName" -}}
|
||||
{{- printf "%s-serving-cert" (include "kamaji.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Kubeconfig Generator Deployment name.
|
||||
*/}}
|
||||
{{- define "kamaji.kubeconfigGeneratorName" -}}
|
||||
{{- if .Values.kubeconfigGenerator.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name "kubeconfig-generator" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
{{/*
|
||||
Create a default fully qualified datastore name.
|
||||
*/}}
|
||||
{{- define "datastore.fullname" -}}
|
||||
{{- default "default" .Values.datastore.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "datastore.labels" -}}
|
||||
kamaji.clastix.io/datastore: {{ .Values.datastore.driver }}
|
||||
helm.sh/chart: {{ include "kamaji.chart" . }}
|
||||
{{ include "kamaji.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Datastore endpoints, in case of ETCD, retrieving the one provided by the chart.
|
||||
*/}}
|
||||
{{- define "datastore.endpoints" -}}
|
||||
{{- if eq .Values.datastore.driver "etcd" }}
|
||||
{{ include "etcd.endpoints" . }}
|
||||
{{- else }}
|
||||
{{ .Values.datastore.endpoints }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
The Certificate Authority section for the DataSource object.
|
||||
*/}}
|
||||
{{- define "datastore.certificateAuthority" -}}
|
||||
{{- if eq .Values.datastore.driver "etcd" }}
|
||||
certificate:
|
||||
secretReference:
|
||||
name: {{ include "etcd.caSecretName" . }}
|
||||
namespace: {{ include "etcd.caSecretNamespace" . }}
|
||||
keyPath: ca.crt
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: {{ include "etcd.caSecretName" . }}
|
||||
namespace: {{ include "etcd.caSecretNamespace" . }}
|
||||
keyPath: ca.key
|
||||
{{- else }}
|
||||
certificate:
|
||||
secretReference:
|
||||
name: {{ .Values.datastore.tlsConfig.certificateAuthority.certificate.name }}
|
||||
namespace: {{ .Values.datastore.tlsConfig.certificateAuthority.certificate.namespace }}
|
||||
keyPath: {{ .Values.datastore.tlsConfig.certificateAuthority.certificate.keyPath }}
|
||||
{{- if .Values.datastore.tlsConfig.certificateAuthority.privateKey.name }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: {{ .Values.datastore.tlsConfig.certificateAuthority.privateKey.name }}
|
||||
namespace: {{ .Values.datastore.tlsConfig.certificateAuthority.privateKey.namespace }}
|
||||
keyPath: {{ .Values.datastore.tlsConfig.certificateAuthority.privateKey.keyPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
The Client Certificate section for the DataSource object.
|
||||
*/}}
|
||||
{{- define "datastore.clientCertificate" -}}
|
||||
{{- if eq .Values.datastore.driver "etcd" }}
|
||||
certificate:
|
||||
secretReference:
|
||||
name: {{ include "etcd.clientSecretName" . }}
|
||||
namespace: {{ include "etcd.clientSecretNamespace" . }}
|
||||
keyPath: tls.crt
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: {{ include "etcd.clientSecretName" . }}
|
||||
namespace: {{ include "etcd.clientSecretNamespace" . }}
|
||||
keyPath: tls.key
|
||||
{{- else }}
|
||||
certificate:
|
||||
secretReference:
|
||||
name: {{ .Values.datastore.tlsConfig.clientCertificate.certificate.name }}
|
||||
namespace: {{ .Values.datastore.tlsConfig.clientCertificate.certificate.namespace }}
|
||||
keyPath: {{ .Values.datastore.tlsConfig.clientCertificate.certificate.keyPath }}
|
||||
privateKey:
|
||||
secretReference:
|
||||
name: {{ .Values.datastore.tlsConfig.clientCertificate.privateKey.name }}
|
||||
namespace: {{ .Values.datastore.tlsConfig.clientCertificate.privateKey.namespace }}
|
||||
keyPath: {{ .Values.datastore.tlsConfig.clientCertificate.privateKey.keyPath }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,142 +0,0 @@
|
||||
{{/*
|
||||
Create a default fully qualified etcd name.
|
||||
*/}}
|
||||
{{- define "etcd.fullname" -}}
|
||||
{{- printf "etcd" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "etcd.serviceAccountName" -}}
|
||||
{{- if .Values.etcd.serviceAccount.create }}
|
||||
{{- default (include "etcd.fullname" .) .Values.etcd.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.etcd.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the Service to use
|
||||
*/}}
|
||||
{{- define "etcd.serviceName" -}}
|
||||
{{- printf "%s" (include "etcd.fullname" .) | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "etcd.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "kamaji.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/components: etcd
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels.
|
||||
*/}}
|
||||
{{- define "etcd.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "kamaji.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/component: etcd
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd CA secret.
|
||||
*/}}
|
||||
{{- define "etcd.caSecretName" }}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- printf "%s-%s" (include "etcd.fullname" .) "certs" | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.etcd.overrides.caSecret.name required!" .Values.etcd.overrides.caSecret.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Namespace of the etcd CA secret.
|
||||
*/}}
|
||||
{{- define "etcd.caSecretNamespace" }}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- .Release.Namespace }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.etcd.overrides.caSecret.namespace required!" .Values.etcd.overrides.caSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the certificate signing requests for the certificates required by etcd.
|
||||
*/}}
|
||||
{{- define "etcd.csrConfigMapName" }}
|
||||
{{- printf "%s-csr" (include "etcd.fullname" .) }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Name of the etcd root-client secret.
|
||||
*/}}
|
||||
{{- define "etcd.clientSecretName" }}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- printf "root-client-certs" }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.etcd.overrides.clientSecret.name required!" .Values.etcd.overrides.clientSecret.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Namespace of the etcd root-client secret.
|
||||
*/}}
|
||||
{{- define "etcd.clientSecretNamespace" }}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- .Release.Namespace }}
|
||||
{{- else }}
|
||||
{{- required "A valid .Values.etcd.overrides.clientSecret.namespace required!" .Values.etcd.overrides.clientSecret.namespace }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Comma separated list of etcd endpoints, using the overrides in case of unmanaged etcd.
|
||||
*/}}
|
||||
{{- define "etcd.endpoints" }}
|
||||
{{- $list := list -}}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- range $count := until 3 -}}
|
||||
{{- $list = append $list (printf "%s-%d.%s.%s.svc.cluster.local:%d" "etcd" $count ( include "etcd.serviceName" . ) $.Release.Namespace (int $.Values.etcd.port) ) -}}
|
||||
{{- end }}
|
||||
{{- else if .Values.etcd.overrides.endpoints }}
|
||||
{{- range $v := .Values.etcd.overrides.endpoints -}}
|
||||
{{- $list = append $list (printf "%s:%d" $v (int $.Values.etcd.port) ) -}}
|
||||
{{- end -}}
|
||||
{{- else if not .Values.etcd.overrides.endpoints }}
|
||||
{{- fail "A valid .Values.etcd.overrides.endpoints required!" }}
|
||||
{{- end }}
|
||||
{{- $list | toYaml }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Key-value of the etcd peers, using the overrides in case of unmanaged etcd.
|
||||
*/}}
|
||||
{{- define "etcd.initialCluster" }}
|
||||
{{- $list := list -}}
|
||||
{{- if .Values.etcd.deploy }}
|
||||
{{- range $i, $count := until 3 -}}
|
||||
{{- $list = append $list ( printf "etcd-%d=https://%s-%d.%s.%s.svc.cluster.local:%d" $i "etcd" $count ( include "etcd.serviceName" . ) $.Release.Namespace (int $.Values.etcd.peerApiPort) ) -}}
|
||||
{{- end }}
|
||||
{{- else if .Values.etcd.overrides.endpoints }}
|
||||
{{- range $k, $v := .Values.etcd.overrides.endpoints -}}
|
||||
{{- $list = append $list ( printf "%s=%s:%d" $k $v (int $.Values.etcd.peerApiPort) ) -}}
|
||||
{{- end -}}
|
||||
{{- else if not .Values.etcd.overrides.endpoints }}
|
||||
{{- fail "A valid .Values.etcd.overrides.endpoints required!" }}
|
||||
{{- end }}
|
||||
{{- join "," $list -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Retrieve the current Kubernetes version to launch a kubectl container with the minimum version skew possible.
|
||||
*/}}
|
||||
{{- define "etcd.jobsTagKubeVersion" -}}
|
||||
{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
|
||||
{{- else }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -19,10 +19,6 @@ spec:
|
||||
labels:
|
||||
{{- include "kamaji.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
serviceAccountName: {{ include "kamaji.serviceAccountName" . }}
|
||||
@@ -33,15 +29,18 @@ spec:
|
||||
- --leader-elect
|
||||
- --metrics-bind-address={{ .Values.metricsBindAddress }}
|
||||
- --tmp-directory={{ .Values.temporaryDirectoryPath }}
|
||||
- --datastore={{ include "datastore.fullname" . }}
|
||||
{{- if not (eq .Values.defaultDatastoreName "") }}
|
||||
- --datastore={{ .Values.defaultDatastoreName }}
|
||||
{{- end }}
|
||||
{{- if .Values.telemetry.disabled }}
|
||||
- --disable-telemetry
|
||||
{{- end }}
|
||||
{{- if .Values.loggingDevel.enable }}
|
||||
- --zap-devel
|
||||
{{- end }}
|
||||
{{- with .Values.extraArgs }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
command:
|
||||
- /kamaji
|
||||
env:
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
apiVersion: kamaji.clastix.io/v1alpha1
|
||||
kind: DataStore
|
||||
metadata:
|
||||
name: {{ include "datastore.fullname" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
labels:
|
||||
{{- include "datastore.labels" . | nindent 4 }}
|
||||
spec:
|
||||
driver: {{ .Values.datastore.driver }}
|
||||
endpoints:
|
||||
{{- include "datastore.endpoints" . | indent 4 }}
|
||||
{{- if (and .Values.datastore.basicAuth.usernameSecret.name .Values.datastore.basicAuth.passwordSecret.name) }}
|
||||
basicAuth:
|
||||
username:
|
||||
secretReference:
|
||||
{{- .Values.datastore.basicAuth.usernameSecret | toYaml | nindent 8 }}
|
||||
password:
|
||||
secretReference:
|
||||
{{- .Values.datastore.basicAuth.passwordSecret | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
{{- include "datastore.certificateAuthority" . | indent 6 }}
|
||||
clientCertificate:
|
||||
{{- include "datastore.clientCertificate" . | indent 6 }}
|
||||
@@ -1,98 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.csrConfigMapName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
|
||||
data:
|
||||
ca-csr.json: |-
|
||||
{
|
||||
"CN": "Clastix CA",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"C": "IT",
|
||||
"ST": "Italy",
|
||||
"L": "Milan"
|
||||
}
|
||||
]
|
||||
}
|
||||
config.json: |-
|
||||
{
|
||||
"signing": {
|
||||
"default": {
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"profiles": {
|
||||
"server-authentication": {
|
||||
"usages": ["signing", "key encipherment", "server auth"],
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"client-authentication": {
|
||||
"usages": ["signing", "key encipherment", "client auth"],
|
||||
"expiry": "8760h"
|
||||
},
|
||||
"peer-authentication": {
|
||||
"usages": ["signing", "key encipherment", "server auth", "client auth"],
|
||||
"expiry": "8760h"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
server-csr.json: |-
|
||||
{
|
||||
"CN": "etcd",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
{{- range $count := until 3 -}}
|
||||
{{ printf "\"etcd-%d.%s.%s.svc.cluster.local\"," $count (include "etcd.serviceName" .) $.Release.Namespace }}
|
||||
{{- end }}
|
||||
"etcd-server.{{ .Release.Namespace }}.svc.cluster.local",
|
||||
"etcd-server.{{ .Release.Namespace }}.svc",
|
||||
"etcd-server",
|
||||
"127.0.0.1"
|
||||
]
|
||||
}
|
||||
peer-csr.json: |-
|
||||
{
|
||||
"CN": "etcd",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"hosts": [
|
||||
{{- range $count := until 3 -}}
|
||||
{{ printf "\"etcd-%d\"," $count }}
|
||||
{{ printf "\"etcd-%d.%s\"," $count (include "etcd.serviceName" .) }}
|
||||
{{ printf "\"etcd-%d.%s.%s.svc\"," $count (include "etcd.serviceName" .) $.Release.Namespace }}
|
||||
{{ printf "\"etcd-%d.%s.%s.svc.cluster.local\"," $count (include "etcd.serviceName" .) $.Release.Namespace }}
|
||||
{{- end }}
|
||||
"127.0.0.1"
|
||||
]
|
||||
}
|
||||
root-client-csr.json: |-
|
||||
{
|
||||
"CN": "root",
|
||||
"key": {
|
||||
"algo": "rsa",
|
||||
"size": 2048
|
||||
},
|
||||
"names": [
|
||||
{
|
||||
"O": "system:masters"
|
||||
}
|
||||
]
|
||||
}
|
||||
{{- end }}
|
||||
@@ -1,35 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
|
||||
name: "{{ .Release.Name }}-etcd-teardown"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- kubectl
|
||||
- --namespace={{ .Release.Namespace }}
|
||||
- delete
|
||||
- secret
|
||||
- --ignore-not-found=true
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
- {{ include "etcd.clientSecretName" . }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,70 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded,hook-failed"
|
||||
name: "{{ .Release.Name }}-etcd-setup"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
kubectl --namespace={{ .Release.Namespace }} rollout status sts/etcd --timeout=300s
|
||||
containers:
|
||||
- command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
etcdctl member list -w table &&
|
||||
etcdctl user add --no-password=true root &&
|
||||
etcdctl role add root &&
|
||||
etcdctl user grant-role root root &&
|
||||
etcdctl auth enable
|
||||
env:
|
||||
- name: ETCDCTL_ENDPOINTS
|
||||
value: https://etcd-0.{{ include "etcd.serviceName" . }}.{{ .Release.Namespace }}.svc.cluster.local:2379
|
||||
- name: ETCDCTL_CACERT
|
||||
value: /opt/certs/ca/ca.crt
|
||||
- name: ETCDCTL_CERT
|
||||
value: /opt/certs/root-certs/tls.crt
|
||||
- name: ETCDCTL_KEY
|
||||
value: /opt/certs/root-certs/tls.key
|
||||
image: quay.io/coreos/etcd:v3.5.1
|
||||
imagePullPolicy: Always
|
||||
name: etcd-client
|
||||
volumeMounts:
|
||||
- name: root-certs
|
||||
mountPath: /opt/certs/root-certs
|
||||
- name: certs
|
||||
mountPath: /opt/certs/ca
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
volumes:
|
||||
- name: root-certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.clientSecretName" . }}
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,64 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": "hook-succeeded"
|
||||
name: "{{ .Release.Name }}-etcd-certs"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
spec:
|
||||
serviceAccountName: {{ include "etcd.serviceAccountName" . }}
|
||||
restartPolicy: Never
|
||||
initContainers:
|
||||
- name: cfssl
|
||||
image: cfssl/cfssl:latest
|
||||
command:
|
||||
- bash
|
||||
- -c
|
||||
- |-
|
||||
cfssl gencert -initca /csr/ca-csr.json | cfssljson -bare /certs/ca &&
|
||||
mv /certs/ca.pem /certs/ca.crt && mv /certs/ca-key.pem /certs/ca.key &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/peer-csr.json | cfssljson -bare /certs/peer &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=peer-authentication /csr/server-csr.json | cfssljson -bare /certs/server &&
|
||||
cfssl gencert -ca=/certs/ca.crt -ca-key=/certs/ca.key -config=/csr/config.json -profile=client-authentication /csr/root-client-csr.json | cfssljson -bare /certs/root-client
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
- mountPath: /csr
|
||||
name: csr
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: {{ printf "clastix/kubectl:%s" (include "etcd.jobsTagKubeVersion" .) }}
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |-
|
||||
kubectl --namespace={{ .Release.Namespace }} delete secret --ignore-not-found=true {{ include "etcd.caSecretName" . }} {{ include "etcd.clientSecretName" . }} &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret generic {{ include "etcd.caSecretName" . }} --from-file=/certs/ca.crt --from-file=/certs/ca.key --from-file=/certs/peer-key.pem --from-file=/certs/peer.pem --from-file=/certs/server-key.pem --from-file=/certs/server.pem &&
|
||||
kubectl --namespace={{ .Release.Namespace }} create secret tls {{ include "etcd.clientSecretName" . }} --key=/certs/root-client-key.pem --cert=/certs/root-client.pem
|
||||
volumeMounts:
|
||||
- mountPath: /certs
|
||||
name: certs
|
||||
securityContext:
|
||||
runAsUser: 1000
|
||||
runAsGroup: 1000
|
||||
fsGroup: 1000
|
||||
volumes:
|
||||
- name: csr
|
||||
configMap:
|
||||
name: {{ include "etcd.csrConfigMapName" . }}
|
||||
- name: certs
|
||||
emptyDir: {}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,55 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: etcd-gen-certs-role
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- delete
|
||||
resourceNames:
|
||||
- {{ include "etcd.caSecretName" . }}
|
||||
- {{ include "etcd.clientSecretName" . }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: etcd-gen-certs-rolebiding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: etcd-gen-certs-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "etcd.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.serviceAccountName" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -1,18 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.serviceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- port: {{ .Values.etcd.port }}
|
||||
name: client
|
||||
- port: {{ .Values.etcd.peerApiPort }}
|
||||
name: peer
|
||||
selector:
|
||||
{{- include "etcd.selectorLabels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
@@ -1,97 +0,0 @@
|
||||
{{- if .Values.etcd.deploy }}
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "etcd.labels" . | nindent 4 }}
|
||||
name: {{ include "etcd.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
serviceName: {{ include "etcd.serviceName" . }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "etcd.selectorLabels" . | nindent 6 }}
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
name: etcd
|
||||
labels:
|
||||
{{- include "etcd.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
volumes:
|
||||
- name: certs
|
||||
secret:
|
||||
secretName: {{ include "etcd.caSecretName" . }}
|
||||
containers:
|
||||
- name: etcd
|
||||
image: {{ .Values.etcd.image.repository }}:{{ .Values.etcd.image.tag | default "v3.5.4" }}
|
||||
imagePullPolicy: {{ .Values.etcd.image.pullPolicy }}
|
||||
ports:
|
||||
- containerPort: 2379
|
||||
name: client
|
||||
- containerPort: 2380
|
||||
name: peer
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /var/run/etcd
|
||||
- name: certs
|
||||
mountPath: /etc/etcd/pki
|
||||
command:
|
||||
- etcd
|
||||
- --data-dir=/var/run/etcd
|
||||
- --name=$(POD_NAME)
|
||||
- --initial-cluster-state=new
|
||||
- --initial-cluster={{ include "etcd.initialCluster" . }}
|
||||
- --initial-advertise-peer-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2380
|
||||
- --advertise-client-urls=https://$(POD_NAME).etcd.$(POD_NAMESPACE).svc.cluster.local:2379
|
||||
- --initial-cluster-token=kamaji
|
||||
- --listen-client-urls=https://0.0.0.0:2379
|
||||
- --listen-metrics-urls=http://0.0.0.0:2381
|
||||
- --listen-peer-urls=https://0.0.0.0:2380
|
||||
- --client-cert-auth=true
|
||||
- --peer-client-cert-auth=true
|
||||
- --trusted-ca-file=/etc/etcd/pki/ca.crt
|
||||
- --cert-file=/etc/etcd/pki/server.pem
|
||||
- --key-file=/etc/etcd/pki/server-key.pem
|
||||
- --peer-trusted-ca-file=/etc/etcd/pki/ca.crt
|
||||
- --peer-cert-file=/etc/etcd/pki/peer.pem
|
||||
- --peer-key-file=/etc/etcd/pki/peer-key.pem
|
||||
- --auto-compaction-mode=periodic
|
||||
- --auto-compaction-retention=5m
|
||||
- --snapshot-count=10000
|
||||
- --quota-backend-bytes=8589934592
|
||||
- --v=8
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- with .Values.etcd.livenessProbe }}
|
||||
livenessProbe:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
{{- with .Values.etcd.startupProbe }}
|
||||
startupProbe:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: data
|
||||
{{- with .Values.etcd.persistence.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
storageClassName: {{ .Values.etcd.persistence.storageClassName }}
|
||||
accessModes:
|
||||
{{- range .Values.etcd.persistence.accessModes }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.etcd.persistence.size }}
|
||||
{{- end }}
|
||||
54
charts/kamaji/templates/kubeconfiggenerator-deployment.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
{{- if .Values.kubeconfigGenerator.enabled }}
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kamaji.labels" . | nindent 4 }}
|
||||
name: {{ include "kamaji.kubeconfigGeneratorName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
replicas: {{ .Values.kubeconfigGenerator.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "kamaji.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.kubeconfigGenerator.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "kamaji.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.kubeconfigGenerator.podSecurityContext | nindent 8 }}
|
||||
serviceAccountName: {{ default .Values.kubeconfigGenerator.serviceAccountOverride (include "kamaji.serviceAccountName" .) }}
|
||||
containers:
|
||||
- args:
|
||||
- kubeconfig-generator
|
||||
- --health-probe-bind-address={{ .Values.kubeconfigGenerator.healthProbeBindAddress }}
|
||||
- --leader-elect={{ .Values.kubeconfigGenerator.enableLeaderElect }}
|
||||
{{- if .Values.kubeconfigGenerator.loggingDevel.enable }}- --zap-devel{{- end }}
|
||||
{{- with .Values.kubeconfigGenerator.extraArgs }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
name: controller
|
||||
resources:
|
||||
{{- toYaml .Values.kubeconfigGenerator.resources | nindent 12 }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.kubeconfigGenerator.securityContext | nindent 12 }}
|
||||
{{- with .Values.kubeconfigGenerator.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.kubeconfigGenerator.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.kubeconfigGenerator.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -8,43 +8,4 @@ metadata:
|
||||
{{- include "kamaji.labels" $data | nindent 4 }}
|
||||
name: kamaji-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-kamaji-clastix-io-v1alpha1-datastore
|
||||
failurePolicy: Fail
|
||||
name: mdatastore.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-kamaji-clastix-io-v1alpha1-tenantcontrolplane
|
||||
failurePolicy: Fail
|
||||
name: mtenantcontrolplane.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
sideEffects: None
|
||||
{{ tpl (.Files.Get "controller-gen/mutating-webhook.yaml") . }}
|
||||
|
||||
@@ -9,6 +9,10 @@ metadata:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 2 }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
@@ -54,122 +58,7 @@ metadata:
|
||||
creationTimestamp: null
|
||||
name: kamaji-manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- datastores
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- datastores/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- tenantcontrolplanes/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
resources:
|
||||
- tenantcontrolplanes/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
{{ tpl (.Files.Get "controller-gen/clusterrole.yaml") . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -8,63 +8,4 @@ metadata:
|
||||
{{- include "kamaji.labels" $data | nindent 4 }}
|
||||
name: kamaji-validating-webhook-configuration
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate--v1-secret
|
||||
failurePolicy: Ignore
|
||||
name: vdatastoresecrets.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- DELETE
|
||||
resources:
|
||||
- secrets
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-kamaji-clastix-io-v1alpha1-datastore
|
||||
failurePolicy: Fail
|
||||
name: vdatastore.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- datastores
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: {{ include "kamaji.webhookServiceName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-kamaji-clastix-io-v1alpha1-tenantcontrolplane
|
||||
failurePolicy: Fail
|
||||
name: vtenantcontrolplane.kb.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- kamaji.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenantcontrolplanes
|
||||
sideEffects: None
|
||||
{{ tpl (.Files.Get "controller-gen/validating-webhook.yaml") . }}
|
||||
|
||||
@@ -15,71 +15,10 @@ image:
|
||||
# -- A list of extra arguments to add to the kamaji controller default ones
|
||||
extraArgs: []
|
||||
|
||||
|
||||
serviceMonitor:
|
||||
# -- Toggle the ServiceMonitor true if you have Prometheus Operator installed and configured
|
||||
enabled: false
|
||||
|
||||
etcd:
|
||||
# -- Install an etcd with enabled multi-tenancy along with Kamaji
|
||||
deploy: true
|
||||
|
||||
# -- The peer API port which servers are listening to.
|
||||
peerApiPort: 2380
|
||||
|
||||
# -- The client request port.
|
||||
port: 2379
|
||||
|
||||
# -- Install specific etcd image
|
||||
image:
|
||||
repository: quay.io/coreos/etcd
|
||||
tag: "v3.5.6"
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# -- The livenessProbe for the etcd container
|
||||
livenessProbe:
|
||||
failureThreshold: 8
|
||||
httpGet:
|
||||
path: /health?serializable=true
|
||||
port: 2381
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 15
|
||||
|
||||
serviceAccount:
|
||||
# -- Create a ServiceAccount, required to install and provision the etcd backing storage (default: true)
|
||||
create: true
|
||||
# -- Define the ServiceAccount name to use during the setup and provision of the etcd backing storage (default: "")
|
||||
name: ""
|
||||
persistence:
|
||||
size: 10Gi
|
||||
storageClass: ""
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
# -- The custom annotations to add to the PVC
|
||||
customAnnotations: {}
|
||||
# volumeType: local
|
||||
|
||||
overrides:
|
||||
caSecret:
|
||||
# -- Name of the secret which contains CA's certificate and private key. (default: "etcd-certs")
|
||||
name: etcd-certs
|
||||
# -- Namespace of the secret which contains CA's certificate and private key. (default: "kamaji-system")
|
||||
namespace: kamaji-system
|
||||
clientSecret:
|
||||
# -- Name of the secret which contains ETCD client certificates. (default: "root-client-certs")
|
||||
name: root-client-certs
|
||||
# -- Name of the namespace where the secret which contains ETCD client certificates is. (default: "kamaji-system")
|
||||
namespace: kamaji-system
|
||||
# -- (map) Dictionary of the endpoints for the etcd cluster's members, key is the name of the etcd server. Don't define the protocol (TLS is automatically inflected), or any port, inflected from .etcd.peerApiPort value.
|
||||
endpoints:
|
||||
etcd-0: etcd-0.etcd.kamaji-system.svc.cluster.local
|
||||
etcd-1: etcd-1.etcd.kamaji-system.svc.cluster.local
|
||||
etcd-2: etcd-2.etcd.kamaji-system.svc.cluster.local
|
||||
# -- ETCD Compaction interval (e.g. "5m0s"). (default: "0" (disabled))
|
||||
compactionInterval: 0
|
||||
|
||||
# -- The address the probe endpoint binds to. (default ":8081")
|
||||
healthProbeBindAddress: ":8081"
|
||||
|
||||
@@ -99,7 +38,7 @@ readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
|
||||
# -- (string) The address the metric endpoint binds to. (default ":8080")
|
||||
# -- The address the metric endpoint binds to. (default ":8080")
|
||||
metricsBindAddress: ":8080"
|
||||
|
||||
imagePullSecrets: []
|
||||
@@ -153,59 +92,67 @@ affinity: {}
|
||||
temporaryDirectoryPath: "/tmp/kamaji"
|
||||
|
||||
loggingDevel:
|
||||
# -- (string) Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false)
|
||||
# -- Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error) (default false)
|
||||
enable: false
|
||||
|
||||
datastore:
|
||||
# -- (string) The Datastore name override, if empty defaults to `default`
|
||||
nameOverride:
|
||||
# -- (string) The Kamaji Datastore driver, supported: etcd, MySQL, PostgreSQL (defaults=etcd).
|
||||
driver: etcd
|
||||
# -- (array) List of endpoints of the selected Datastore. When letting the Chart install the etcd datastore, this field is populated automatically.
|
||||
endpoints: []
|
||||
basicAuth:
|
||||
usernameSecret:
|
||||
# -- The name of the Secret containing the username used to connect to the relational database.
|
||||
name:
|
||||
# -- The namespace of the Secret containing the username used to connect to the relational database.
|
||||
namespace:
|
||||
# -- The Secret key where the data is stored.
|
||||
keyPath:
|
||||
passwordSecret:
|
||||
# -- The name of the Secret containing the password used to connect to the relational database.
|
||||
name:
|
||||
# -- The namespace of the Secret containing the password used to connect to the relational database.
|
||||
namespace:
|
||||
# -- The Secret key where the data is stored.
|
||||
keyPath:
|
||||
tlsConfig:
|
||||
certificateAuthority:
|
||||
certificate:
|
||||
# -- Name of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
name:
|
||||
# -- Namespace of the Secret containing the CA required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the certificate.
|
||||
keyPath:
|
||||
privateKey:
|
||||
# -- Name of the Secret containing the CA private key required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
name:
|
||||
# -- Namespace of the Secret containing the CA private key required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the private key.
|
||||
keyPath:
|
||||
clientCertificate:
|
||||
certificate:
|
||||
# -- Name of the Secret containing the client certificate required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
name:
|
||||
# -- Namespace of the Secret containing the client certificate required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the certificate.
|
||||
keyPath:
|
||||
privateKey:
|
||||
# -- Name of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
name:
|
||||
# -- Namespace of the Secret containing the client certificate private key required to establish the mandatory SSL/TLS connection to the datastore.
|
||||
namespace:
|
||||
# -- Key of the Secret which contains the content of the private key.
|
||||
keyPath:
|
||||
# -- If specified, all the Kamaji instances with an unassigned DataStore will inherit this default value.
|
||||
defaultDatastoreName: default
|
||||
|
||||
# -- Subchart: See https://github.com/clastix/kamaji-etcd/blob/master/charts/kamaji-etcd/values.yaml
|
||||
kamaji-etcd:
|
||||
deploy: true
|
||||
fullnameOverride: kamaji-etcd
|
||||
## -- Important, this must match your management cluster's clusterDomain, otherwise the init jobs will fail
|
||||
clusterDomain: "cluster.local"
|
||||
datastore:
|
||||
enabled: true
|
||||
name: default
|
||||
|
||||
# -- Disable the analytics traces collection
|
||||
telemetry:
|
||||
disabled: false
|
||||
|
||||
kubeconfigGenerator:
|
||||
# -- Toggle to deploy the Kubeconfig Generator Deployment.
|
||||
enabled: false
|
||||
fullnameOverride: ""
|
||||
# -- The number of the pod replicas for the Kubeconfig Generator controller.
|
||||
replicaCount: 2
|
||||
# -- The annotations to apply to the Kubeconfig Generator controller pods.
|
||||
podAnnotations: {}
|
||||
# -- The securityContext to apply to the Kubeconfig Generator controller pods.
|
||||
podSecurityContext:
|
||||
runAsNonRoot: true
|
||||
# -- The name of the service account to use. If not set, the root Kamaji one will be used.
|
||||
serviceAccountOverride: ""
|
||||
# -- The address the probe endpoint binds to.
|
||||
healthProbeBindAddress: ":8081"
|
||||
# -- Enables the leader election.
|
||||
enableLeaderElect: true
|
||||
loggingDevel:
|
||||
# -- Development Mode defaults(encoder=consoleEncoder,logLevel=Debug,stackTraceLevel=Warn). Production Mode defaults(encoder=jsonEncoder,logLevel=Info,stackTraceLevel=Error)
|
||||
enable: false
|
||||
# -- A list of extra arguments to add to the Kubeconfig Generator controller default ones.
|
||||
extraArgs: []
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
# -- The securityContext to apply to the Kubeconfig Generator controller container only.
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
# -- Kubernetes node selector rules to schedule Kubeconfig Generator controller
|
||||
nodeSelector: {}
|
||||
# -- Kubernetes node taints that the Kubeconfig Generator controller pods would tolerate
|
||||
tolerations: []
|
||||
# -- Kubernetes affinity rules to apply to Kubeconfig Generator controller pods
|
||||
affinity: {}
|
||||
|
||||
167
cmd/kubeconfig-generator/cmd.go
Normal file
@@ -0,0 +1,167 @@
|
||||
// Copyright 2022 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package kubeconfiggenerator
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
goRuntime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
|
||||
"github.com/clastix/kamaji/controllers"
|
||||
"github.com/clastix/kamaji/internal"
|
||||
)
|
||||
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
controllerReconcileTimeout time.Duration
|
||||
cacheResyncPeriod time.Duration
|
||||
managerNamespace string
|
||||
certificateExpirationDeadline time.Duration
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "kubeconfig-generator",
|
||||
Short: "Start the Kubeconfig Generator manager",
|
||||
SilenceErrors: false,
|
||||
SilenceUsage: true,
|
||||
PreRunE: func(*cobra.Command, []string) error {
|
||||
// Avoid polluting stdout with useless details by the underlying klog implementations
|
||||
klog.SetOutput(io.Discard)
|
||||
klog.LogToStderr(false)
|
||||
|
||||
if certificateExpirationDeadline < 24*time.Hour {
|
||||
return fmt.Errorf("certificate expiration deadline must be at least 24 hours")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
|
||||
setupLog := ctrl.Log.WithName("kubeconfig-generator")
|
||||
|
||||
setupLog.Info(fmt.Sprintf("Kamaji version %s %s%s", internal.GitTag, internal.GitCommit, internal.GitDirty))
|
||||
setupLog.Info(fmt.Sprintf("Build from: %s", internal.GitRepo))
|
||||
setupLog.Info(fmt.Sprintf("Build date: %s", internal.BuildTime))
|
||||
setupLog.Info(fmt.Sprintf("Go Version: %s", goRuntime.Version()))
|
||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
|
||||
|
||||
ctrlOpts := ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: metricsBindAddress,
|
||||
},
|
||||
HealthProbeBindAddress: healthProbeBindAddress,
|
||||
LeaderElection: leaderElect,
|
||||
LeaderElectionNamespace: managerNamespace,
|
||||
LeaderElectionID: "kubeconfiggenerator.kamaji.clastix.io",
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.SyncPeriod = &cacheResyncPeriod
|
||||
|
||||
return cache.New(config, opts)
|
||||
},
|
||||
}
|
||||
|
||||
triggerChan := make(chan event.GenericEvent)
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrlOpts)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
setupLog.Info("setting probes")
|
||||
{
|
||||
if err = mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up health check")
|
||||
|
||||
return err
|
||||
}
|
||||
if err = mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
|
||||
setupLog.Error(err, "unable to set up ready check")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certController := &controllers.CertificateLifecycle{Channel: triggerChan, Deadline: certificateExpirationDeadline}
|
||||
certController.EnqueueFn = certController.EnqueueForKubeconfigGenerator
|
||||
if err = certController.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CertificateLifecycle")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&controllers.KubeconfigGeneratorWatcher{
|
||||
Client: mgr.GetClient(),
|
||||
GeneratorChan: triggerChan,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "KubeconfigGeneratorWatcher")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&controllers.KubeconfigGeneratorReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
NotValidThreshold: certificateExpirationDeadline,
|
||||
CertificateChan: triggerChan,
|
||||
}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "KubeconfigGenerator")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
setupLog.Info("starting manager")
|
||||
if err = mgr.Start(ctx); err != nil {
|
||||
setupLog.Error(err, "problem running manager")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
// Setting zap logger
|
||||
zapfs := flag.NewFlagSet("zap", flag.ExitOnError)
|
||||
opts := zap.Options{
|
||||
Development: true,
|
||||
}
|
||||
opts.BindFlags(zapfs)
|
||||
cmd.Flags().AddGoFlagSet(zapfs)
|
||||
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
|
||||
// Setting CLI flags
|
||||
cmd.Flags().StringVar(&metricsBindAddress, "metrics-bind-address", ":8090", "The address the metric endpoint binds to.")
|
||||
cmd.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8091", "The address the probe endpoint binds to.")
|
||||
cmd.Flags().BoolVar(&leaderElect, "leader-elect", true, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
cmd.Flags().DurationVar(&controllerReconcileTimeout, "controller-reconcile-timeout", 30*time.Second, "The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint.")
|
||||
cmd.Flags().DurationVar(&cacheResyncPeriod, "cache-resync-period", 10*time.Hour, "The controller-runtime.Manager cache resync period.")
|
||||
cmd.Flags().StringVar(&managerNamespace, "pod-namespace", os.Getenv("POD_NAMESPACE"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().DurationVar(&certificateExpirationDeadline, "certificate-expiration-deadline", 24*time.Hour, "Define the deadline upon certificate expiration to start the renewal process, cannot be less than a 24 hours.")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
viper.AutomaticEnv()
|
||||
})
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -4,75 +4,102 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
goRuntime "runtime"
|
||||
"time"
|
||||
|
||||
telemetryclient "github.com/clastix/kamaji-telemetry/pkg/client"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/klog/v2"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/healthz"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
|
||||
ctrlwebhook "sigs.k8s.io/controller-runtime/pkg/webhook"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
cmdutils "github.com/clastix/kamaji/cmd/utils"
|
||||
"github.com/clastix/kamaji/controllers"
|
||||
"github.com/clastix/kamaji/controllers/soot"
|
||||
"github.com/clastix/kamaji/internal"
|
||||
"github.com/clastix/kamaji/internal/builders/controlplane"
|
||||
datastoreutils "github.com/clastix/kamaji/internal/datastore/utils"
|
||||
"github.com/clastix/kamaji/internal/utilities"
|
||||
"github.com/clastix/kamaji/internal/webhook"
|
||||
"github.com/clastix/kamaji/internal/webhook/handlers"
|
||||
"github.com/clastix/kamaji/internal/webhook/routes"
|
||||
)
|
||||
|
||||
//nolint:maintidx
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
metricsBindAddress string
|
||||
healthProbeBindAddress string
|
||||
leaderElect bool
|
||||
tmpDirectory string
|
||||
kineImage string
|
||||
controllerReconcileTimeout time.Duration
|
||||
cacheResyncPeriod time.Duration
|
||||
datastore string
|
||||
managerNamespace string
|
||||
managerServiceAccountName string
|
||||
managerServiceName string
|
||||
webhookCABundle []byte
|
||||
migrateJobImage string
|
||||
maxConcurrentReconciles int
|
||||
disableTelemetry bool
|
||||
certificateExpirationDeadline time.Duration
|
||||
|
||||
webhookCAPath string
|
||||
)
|
||||
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "manager",
|
||||
Short: "Start the Kamaji Kubernetes Operator",
|
||||
SilenceErrors: false,
|
||||
SilenceUsage: true,
|
||||
PreRunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
PreRunE: func(cmd *cobra.Command, _ []string) (err error) {
|
||||
// Avoid to pollute Kamaji stdout with useless details by the underlying klog implementations
|
||||
klog.SetOutput(io.Discard)
|
||||
klog.LogToStderr(false)
|
||||
|
||||
if err = cmdutils.CheckFlags(cmd.Flags(), []string{"kine-image", "datastore", "migrate-image", "tmp-directory", "pod-namespace", "webhook-service-name", "serviceaccount-name", "webhook-ca-path"}...); err != nil {
|
||||
if err = cmdutils.CheckFlags(cmd.Flags(), []string{"kine-image", "migrate-image", "tmp-directory", "pod-namespace", "webhook-service-name", "serviceaccount-name", "webhook-ca-path"}...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if certificateExpirationDeadline < 24*time.Hour {
|
||||
return fmt.Errorf("certificate expiration deadline must be at least 24 hours")
|
||||
}
|
||||
|
||||
if webhookCABundle, err = os.ReadFile(webhookCAPath); err != nil {
|
||||
return fmt.Errorf("unable to read webhook CA: %w", err)
|
||||
}
|
||||
|
||||
if err = datastoreutils.CheckExists(ctx, scheme, datastore); err != nil {
|
||||
if err = datastoreutils.CheckExists(context.Background(), scheme, datastore); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if controllerReconcileTimeout.Seconds() == 0 {
|
||||
return fmt.Errorf("the controller reconcile timeout must be greater than zero")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
ctx := ctrl.SetupSignalHandler()
|
||||
|
||||
setupLog := ctrl.Log.WithName("setup")
|
||||
|
||||
setupLog.Info(fmt.Sprintf("Kamaji version %s %s%s", internal.GitTag, internal.GitCommit, internal.GitDirty))
|
||||
@@ -80,54 +107,108 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
setupLog.Info(fmt.Sprintf("Build date: %s", internal.BuildTime))
|
||||
setupLog.Info(fmt.Sprintf("Go Version: %s", goRuntime.Version()))
|
||||
setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", goRuntime.GOOS, goRuntime.GOARCH))
|
||||
setupLog.Info(fmt.Sprintf("Telemetry enabled: %t", !disableTelemetry))
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsBindAddress,
|
||||
Port: 9443,
|
||||
telemetryClient := telemetryclient.New(http.Client{Timeout: 5 * time.Second}, "https://telemetry.clastix.io")
|
||||
if disableTelemetry {
|
||||
telemetryClient = telemetryclient.NewNewOp()
|
||||
}
|
||||
|
||||
ctrlOpts := ctrl.Options{
|
||||
Scheme: scheme,
|
||||
Metrics: metricsserver.Options{
|
||||
BindAddress: metricsBindAddress,
|
||||
},
|
||||
WebhookServer: ctrlwebhook.NewServer(ctrlwebhook.Options{
|
||||
Port: 9443,
|
||||
}),
|
||||
HealthProbeBindAddress: healthProbeBindAddress,
|
||||
LeaderElection: leaderElect,
|
||||
LeaderElectionNamespace: managerNamespace,
|
||||
LeaderElectionID: "799b98bc.clastix.io",
|
||||
})
|
||||
LeaderElectionID: "kamaji.clastix.io",
|
||||
NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
|
||||
opts.SyncPeriod = &cacheResyncPeriod
|
||||
|
||||
return cache.New(config, opts)
|
||||
},
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrlOpts)
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to start manager")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
tcpChannel := make(controllers.TenantControlPlaneChannel)
|
||||
tcpChannel, certChannel := make(chan event.GenericEvent), make(chan event.GenericEvent)
|
||||
|
||||
if err = (&controllers.DataStore{TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
if err = (&controllers.DataStore{Client: mgr.GetClient(), TenantControlPlaneTrigger: tcpChannel}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "DataStore")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
discoveryClient, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to create discovery client")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
reconciler := &controllers.TenantControlPlaneReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
APIReader: mgr.GetAPIReader(),
|
||||
Config: controllers.TenantControlPlaneReconcilerConfig{
|
||||
DefaultDataStoreName: datastore,
|
||||
KineContainerImage: kineImage,
|
||||
TmpBaseDirectory: tmpDirectory,
|
||||
DefaultDataStoreName: datastore,
|
||||
KineContainerImage: kineImage,
|
||||
TmpBaseDirectory: tmpDirectory,
|
||||
CertExpirationThreshold: certificateExpirationDeadline,
|
||||
},
|
||||
ReconcileTimeout: controllerReconcileTimeout,
|
||||
CertificateChan: certChannel,
|
||||
TriggerChan: tcpChannel,
|
||||
KamajiNamespace: managerNamespace,
|
||||
KamajiServiceAccount: managerServiceAccountName,
|
||||
KamajiService: managerServiceName,
|
||||
KamajiMigrateImage: migrateJobImage,
|
||||
MaxConcurrentReconciles: maxConcurrentReconciles,
|
||||
DiscoveryClient: discoveryClient,
|
||||
}
|
||||
|
||||
if err = reconciler.SetupWithManager(mgr); err != nil {
|
||||
if err = reconciler.SetupWithManager(ctx, mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "Namespace")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&webhook.Freeze{}).SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to register webhook", "webhook", "Freeze")
|
||||
k8sVersion, versionErr := cmdutils.KubernetesVersion(mgr.GetConfig())
|
||||
if versionErr != nil {
|
||||
setupLog.Error(err, "unable to get kubernetes version")
|
||||
|
||||
k8sVersion = "Unknown"
|
||||
}
|
||||
|
||||
if !disableTelemetry {
|
||||
err = mgr.Add(&controllers.TelemetryController{
|
||||
Client: mgr.GetClient(),
|
||||
KubernetesVersion: k8sVersion,
|
||||
KamajiVersion: internal.GitTag,
|
||||
TelemetryClient: telemetryClient,
|
||||
LeaderElectionNamespace: ctrlOpts.LeaderElectionNamespace,
|
||||
LeaderElectionID: ctrlOpts.LeaderElectionID,
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "TelemetryController")
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
certController := &controllers.CertificateLifecycle{Channel: certChannel, Deadline: certificateExpirationDeadline}
|
||||
certController.EnqueueFn = certController.EnqueueForTenantControlPlane
|
||||
|
||||
if err = certController.SetupWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create controller", "controller", "CertificateLifecycle")
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -144,13 +225,66 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = (&kamajiv1alpha1.TenantControlPlane{}).SetupWebhookWithManager(mgr, datastore); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "TenantControlPlane")
|
||||
// Only requires to look for the core api group.
|
||||
if utilities.AreGatewayResourcesAvailable(ctx, mgr.GetClient(), discoveryClient) {
|
||||
if err = (&kamajiv1alpha1.GatewayListener{}).SetupWithManager(ctx, mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create indexer", "indexer", "GatewayListener")
|
||||
|
||||
return err
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = (&kamajiv1alpha1.DataStore{}).SetupWebhookWithManager(mgr); err != nil {
|
||||
setupLog.Error(err, "unable to create webhook", "webhook", "DataStore")
|
||||
|
||||
err = webhook.Register(mgr, map[routes.Route][]handlers.Handler{
|
||||
routes.TenantControlPlaneMigrate{}: {
|
||||
handlers.Freeze{},
|
||||
},
|
||||
routes.TenantControlPlaneWritePermission{}: {
|
||||
handlers.WritePermission{},
|
||||
},
|
||||
routes.TenantControlPlaneDefaults{}: {
|
||||
handlers.TenantControlPlaneDefaults{
|
||||
DefaultDatastore: datastore,
|
||||
},
|
||||
},
|
||||
routes.TenantControlPlaneValidate{}: {
|
||||
handlers.TenantControlPlaneCertSANs{},
|
||||
handlers.TenantControlPlaneName{},
|
||||
handlers.TenantControlPlaneVersion{},
|
||||
handlers.TenantControlPlaneDataStore{Client: mgr.GetClient()},
|
||||
handlers.TenantControlPlaneDeployment{
|
||||
Client: mgr.GetClient(),
|
||||
DeploymentBuilder: controlplane.Deployment{
|
||||
Client: mgr.GetClient(),
|
||||
KineContainerImage: kineImage,
|
||||
},
|
||||
KonnectivityBuilder: controlplane.Konnectivity{
|
||||
Scheme: *mgr.GetScheme(),
|
||||
},
|
||||
},
|
||||
handlers.TenantControlPlaneServiceCIDR{},
|
||||
handlers.TenantControlPlaneLoadBalancerSourceRanges{},
|
||||
handlers.TenantControlPlaneGatewayValidation{
|
||||
Client: mgr.GetClient(),
|
||||
DiscoveryClient: discoveryClient,
|
||||
},
|
||||
},
|
||||
routes.TenantControlPlaneTelemetry{}: {
|
||||
handlers.TenantControlPlaneTelemetry{
|
||||
Enabled: !disableTelemetry,
|
||||
TelemetryClient: telemetryClient,
|
||||
KamajiVersion: internal.GitTag,
|
||||
KubernetesVersion: k8sVersion,
|
||||
},
|
||||
},
|
||||
routes.DataStoreValidate{}: {
|
||||
handlers.DataStoreValidation{Client: mgr.GetClient()},
|
||||
},
|
||||
routes.DataStoreSecrets{}: {
|
||||
handlers.DataStoreSecretValidation{Client: mgr.GetClient()},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
setupLog.Error(err, "unable to create webhook")
|
||||
|
||||
return err
|
||||
}
|
||||
@@ -187,6 +321,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// Setting zap logger
|
||||
zapfs := flag.NewFlagSet("zap", flag.ExitOnError)
|
||||
opts := zap.Options{
|
||||
@@ -200,14 +335,18 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
cmd.Flags().StringVar(&healthProbeBindAddress, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
|
||||
cmd.Flags().BoolVar(&leaderElect, "leader-elect", true, "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
|
||||
cmd.Flags().StringVar(&tmpDirectory, "tmp-directory", "/tmp/kamaji", "Directory which will be used to work with temporary files.")
|
||||
cmd.Flags().StringVar(&kineImage, "kine-image", "rancher/kine:v0.9.2-amd64", "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies).")
|
||||
cmd.Flags().StringVar(&datastore, "datastore", "etcd", "The default DataStore that should be used by Kamaji to setup the required storage.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("clastix/kamaji:v%s", internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().StringVar(&kineImage, "kine-image", "rancher/kine:v0.11.10-amd64", "Container image along with tag to use for the Kine sidecar container (used only if etcd-storage-type is set to one of kine strategies).")
|
||||
cmd.Flags().StringVar(&datastore, "datastore", "", "Optional, the default DataStore that should be used by Kamaji to setup the required storage of Tenant Control Planes with undeclared DataStore.")
|
||||
cmd.Flags().StringVar(&migrateJobImage, "migrate-image", fmt.Sprintf("%s/clastix/kamaji:%s", internal.ContainerRepository, internal.GitTag), "Specify the container image to launch when a TenantControlPlane is migrated to a new datastore.")
|
||||
cmd.Flags().IntVar(&maxConcurrentReconciles, "max-concurrent-tcp-reconciles", 1, "Specify the number of workers for the Tenant Control Plane controller (beware of CPU consumption)")
|
||||
cmd.Flags().StringVar(&managerNamespace, "pod-namespace", os.Getenv("POD_NAMESPACE"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceName, "webhook-service-name", "kamaji-webhook-service", "The Kamaji webhook server Service name which is used to get validation webhooks, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&managerServiceAccountName, "serviceaccount-name", os.Getenv("SERVICE_ACCOUNT"), "The Kubernetes Namespace on which the Operator is running in, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().StringVar(&webhookCAPath, "webhook-ca-path", "/tmp/k8s-webhook-server/serving-certs/ca.crt", "Path to the Manager webhook server CA, required for the TenantControlPlane migration jobs.")
|
||||
cmd.Flags().DurationVar(&controllerReconcileTimeout, "controller-reconcile-timeout", 30*time.Second, "The reconciliation request timeout before the controller withdraw the external resource calls, such as dealing with the Datastore, or the Tenant Control Plane API endpoint.")
|
||||
cmd.Flags().DurationVar(&cacheResyncPeriod, "cache-resync-period", 10*time.Hour, "The controller-runtime.Manager cache resync period.")
|
||||
cmd.Flags().BoolVar(&disableTelemetry, "disable-telemetry", false, "Disable the analytics traces collection.")
|
||||
cmd.Flags().DurationVar(&certificateExpirationDeadline, "certificate-expiration-deadline", 24*time.Hour, "Define the deadline upon certificate expiration to start the renewal process, cannot be less than a 24 hours.")
|
||||
|
||||
cobra.OnInitialize(func() {
|
||||
viper.AutomaticEnv()
|
||||
|
||||
@@ -22,16 +22,17 @@ import (
|
||||
func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
// CLI flags
|
||||
var (
|
||||
tenantControlPlane string
|
||||
targetDataStore string
|
||||
timeout time.Duration
|
||||
tenantControlPlane string
|
||||
targetDataStore string
|
||||
cleanupPriorMigration bool
|
||||
timeout time.Duration
|
||||
)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "migrate",
|
||||
Short: "Migrate the data of a TenantControlPlane to another compatible DataStore",
|
||||
SilenceUsage: true,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(*cobra.Command, []string) error {
|
||||
ctx, cancelFn := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancelFn()
|
||||
|
||||
@@ -95,6 +96,20 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
defer targetConnection.Close()
|
||||
|
||||
if cleanupPriorMigration {
|
||||
log.Info("Checking if target DataStore should be clean-up prior migration")
|
||||
|
||||
if exists, _ := targetConnection.DBExists(ctx, tcp.Status.Storage.Setup.Schema); exists {
|
||||
log.Info("A colliding schema on target DataStore is present, cleaning up")
|
||||
|
||||
if dErr := targetConnection.DeleteDB(ctx, tcp.Status.Storage.Setup.Schema); dErr != nil {
|
||||
return fmt.Errorf("error cleaning up prior migration: %s", dErr.Error())
|
||||
}
|
||||
|
||||
log.Info("Cleaning up prior migration has been completed")
|
||||
}
|
||||
}
|
||||
// Start migrating from the old Datastore to the new one
|
||||
log.Info("migration from origin to target started")
|
||||
|
||||
@@ -110,6 +125,7 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
|
||||
cmd.Flags().StringVar(&tenantControlPlane, "tenant-control-plane", "", "Namespaced-name of the TenantControlPlane that must be migrated (e.g.: default/test)")
|
||||
cmd.Flags().StringVar(&targetDataStore, "target-datastore", "", "Name of the Datastore to which the TenantControlPlane will be migrated")
|
||||
cmd.Flags().BoolVar(&cleanupPriorMigration, "cleanup-prior-migration", false, "When set to true, migration job will drop existing data in the target DataStore: useful to avoid stale data when migrating back and forth between DataStores.")
|
||||
cmd.Flags().DurationVar(&timeout, "timeout", 5*time.Minute, "Amount of time for the context timeout")
|
||||
|
||||
_ = cmd.MarkFlagRequired("tenant-control-plane")
|
||||
|
||||
16
cmd/root.go
@@ -4,14 +4,14 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
_ "go.uber.org/automaxprocs" // Automatically set `GOMAXPROCS` to match Linux container CPU quota.
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
|
||||
appsv1 "k8s.io/kubernetes/pkg/apis/apps/v1"
|
||||
gatewayv1 "sigs.k8s.io/gateway-api/apis/v1"
|
||||
gatewayv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
|
||||
|
||||
kamajiv1alpha1 "github.com/clastix/kamaji/api/v1alpha1"
|
||||
)
|
||||
@@ -20,12 +20,14 @@ func NewCmd(scheme *runtime.Scheme) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "kamaji",
|
||||
Short: "Build and operate Kubernetes at scale with a fraction of operational burden.",
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
// Seed is required to ensure non reproducibility for the certificates generate by Kamaji.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
PersistentPreRun: func(*cobra.Command, []string) {
|
||||
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
|
||||
utilruntime.Must(kamajiv1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(appsv1.RegisterDefaults(scheme))
|
||||
// NOTE: This will succeed even if Gateway API is not installed in the cluster.
|
||||
// Only registers the go types.
|
||||
utilruntime.Must(gatewayv1.Install(scheme))
|
||||
utilruntime.Must(gatewayv1alpha2.Install(scheme))
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||