mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
659 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
93fb083788 | ||
|
|
186bfb4a19 | ||
|
|
e9811bf166 | ||
|
|
93e7aca146 | ||
|
|
ff7c5c0f74 | ||
|
|
1ffef1a1d4 | ||
|
|
c9b919f2f4 | ||
|
|
b4cc5420ac | ||
|
|
865a985bcd | ||
|
|
2cd4f2397a | ||
|
|
53b650ac80 | ||
|
|
32d5bb877f | ||
|
|
60a2f26976 | ||
|
|
01205e70df | ||
|
|
785cc49374 | ||
|
|
489a900a20 | ||
|
|
394707a7f8 | ||
|
|
242fd80209 | ||
|
|
fa09ff7e76 | ||
|
|
12826023d4 | ||
|
|
ba6cc12daf | ||
|
|
ec5586fcb7 | ||
|
|
edf57bc94c | ||
|
|
f2a0e81ad1 | ||
|
|
a461080c05 | ||
|
|
2a239d4667 | ||
|
|
eaae123248 | ||
|
|
28b70651fd | ||
|
|
595841cf3f | ||
|
|
79bc824c7d | ||
|
|
99bb4da3d4 | ||
|
|
c6e7c328c6 | ||
|
|
41cf1056a6 | ||
|
|
db80cc755d | ||
|
|
6aef0ccc1b | ||
|
|
3862d808e9 | ||
|
|
7068d00345 | ||
|
|
aaf53097c5 | ||
|
|
3981623ec2 | ||
|
|
b0b2b58afd | ||
|
|
735a621e02 | ||
|
|
8cfc992cfc | ||
|
|
325515f805 | ||
|
|
ca95a7f4e8 | ||
|
|
19d88dbe0c | ||
|
|
a2d23f8ea5 | ||
|
|
e210ea62fe | ||
|
|
7a9bb4fcbc | ||
|
|
208a55f995 | ||
|
|
6bf4620b1b | ||
|
|
ba2ffcd561 | ||
|
|
b1cb6df1fa | ||
|
|
17ce721ddc | ||
|
|
53ab40a201 | ||
|
|
968855335d | ||
|
|
7789dc96f5 | ||
|
|
2791a00de9 | ||
|
|
9d9196e9ba | ||
|
|
f0caad4f38 | ||
|
|
f1e2d21105 | ||
|
|
13b6d2a878 | ||
|
|
7bf23a55c1 | ||
|
|
bd3e8b0d09 | ||
|
|
bb57e8429a | ||
|
|
21552a102a | ||
|
|
74a6bee9da | ||
|
|
0c5a8a5c1f | ||
|
|
8eb86c671c | ||
|
|
515f416a70 | ||
|
|
fcced46c6a | ||
|
|
9a7c9cb1b1 | ||
|
|
81c1b79203 | ||
|
|
a262d734b7 | ||
|
|
5ddb07b1c1 | ||
|
|
57e33facd7 | ||
|
|
34a6870fa9 | ||
|
|
354d348481 | ||
|
|
db0eaaabdc | ||
|
|
3524ab3ebb | ||
|
|
ade07b27fd | ||
|
|
6c9c6094eb | ||
|
|
c4d314210e | ||
|
|
e8704b6289 | ||
|
|
6b856c84f1 | ||
|
|
1442f92e68 | ||
|
|
c06ecd1788 | ||
|
|
babe5b07a9 | ||
|
|
80a1578beb | ||
|
|
4654e047cf | ||
|
|
76372be456 | ||
|
|
0d4593cb5e | ||
|
|
0071048d6d | ||
|
|
bb1afc2932 | ||
|
|
800232c5e8 | ||
|
|
7d50d13fbd | ||
|
|
86f33dec94 | ||
|
|
5a53a39500 | ||
|
|
7df32aefc8 | ||
|
|
5d8622b6ee | ||
|
|
70ab56606d | ||
|
|
04a987411f | ||
|
|
ef0ea91ec5 | ||
|
|
e6d833bc20 | ||
|
|
32d5ce8990 | ||
|
|
12b9a197a4 | ||
|
|
26f28e632e | ||
|
|
181b88a2b8 | ||
|
|
93f8467b33 | ||
|
|
3c266657b6 | ||
|
|
474b925f30 | ||
|
|
17b49794a2 | ||
|
|
2abd1164fc | ||
|
|
3cca8645cb | ||
|
|
3d14a846b5 | ||
|
|
7b61a748f8 | ||
|
|
4dceccc1c6 | ||
|
|
80c54f8619 | ||
|
|
08f16d133a | ||
|
|
43ea11a73b | ||
|
|
038f9a8eb8 | ||
|
|
6b02646878 | ||
|
|
b6b9bb97e7 | ||
|
|
ae67d2cd40 | ||
|
|
55ad4f7161 | ||
|
|
2c7480ac67 | ||
|
|
3f49abebb9 | ||
|
|
070355e79d | ||
|
|
49997ac248 | ||
|
|
b6894f6de1 | ||
|
|
cda9fa9bf0 | ||
|
|
074a42d8ce | ||
|
|
cbf501fc75 | ||
|
|
9f5211b730 | ||
|
|
ab7e4ddca7 | ||
|
|
fc955f72c8 | ||
|
|
0bcf8cb1e1 | ||
|
|
ac7e965326 | ||
|
|
7f5ea8c18f | ||
|
|
b73b04d9ae | ||
|
|
2e68364298 | ||
|
|
0ab949f781 | ||
|
|
e718be07c2 | ||
|
|
fb9748eae7 | ||
|
|
d571ebec0c | ||
|
|
1e285cbc11 | ||
|
|
b557e68d16 | ||
|
|
757f76c739 | ||
|
|
0a3a01a859 | ||
|
|
2260d72873 | ||
|
|
3848e031c2 | ||
|
|
6cd458b8ed | ||
|
|
09d90532e6 | ||
|
|
220a9d5f68 | ||
|
|
6eeba71273 | ||
|
|
3f6e98a9d5 | ||
|
|
9b21bf36b6 | ||
|
|
fe2c34c451 | ||
|
|
5d6309b941 | ||
|
|
8595b4ac43 | ||
|
|
aa1e2655bf | ||
|
|
21711b347a | ||
|
|
24d7b6b584 | ||
|
|
4bbbbc26ea | ||
|
|
c40632f11c | ||
|
|
1e752f5e3d | ||
|
|
e8fdf56406 | ||
|
|
c3e40f2f34 | ||
|
|
d7a3d4ca7e | ||
|
|
6f46ebc9ee | ||
|
|
c71ce83b93 | ||
|
|
ec2f2e8f0a | ||
|
|
e8a5b84603 | ||
|
|
b1ee068b89 | ||
|
|
7b088bed23 | ||
|
|
f682a69322 | ||
|
|
5dea8d2afb | ||
|
|
2c4a6d5e8b | ||
|
|
8323a35609 | ||
|
|
81199fdeb5 | ||
|
|
f8ac8b949a | ||
|
|
c9ea89480c | ||
|
|
667322ea29 | ||
|
|
26faea70cc | ||
|
|
df92a3a3dc | ||
|
|
c53ed24c2c | ||
|
|
c76234d9f3 | ||
|
|
2e5e5b41eb | ||
|
|
195b129cdd | ||
|
|
bad7ad33a3 | ||
|
|
5da570abb8 | ||
|
|
a8710c8132 | ||
|
|
6da76a7a7e | ||
|
|
335467843b | ||
|
|
adb519b4c7 | ||
|
|
6c5e46c776 | ||
|
|
4e909e5df2 | ||
|
|
63627bb7f3 | ||
|
|
906e622ce0 | ||
|
|
30c0f5be0b | ||
|
|
d4b7d384ed | ||
|
|
da9e526185 | ||
|
|
9a9fee2d10 | ||
|
|
c82dd8b730 | ||
|
|
0838acdb02 | ||
|
|
db927a2ff2 | ||
|
|
5b602f68c3 | ||
|
|
95d2173a80 | ||
|
|
e383fb2fed | ||
|
|
51d63c786e | ||
|
|
481725b1c8 | ||
|
|
70eafce2c1 | ||
|
|
f0c3b61f56 | ||
|
|
7cde0e7985 | ||
|
|
768f7196b7 | ||
|
|
222a9fd42f | ||
|
|
467a2e6229 | ||
|
|
226f1159dc | ||
|
|
24bb264c5a | ||
|
|
038c923636 | ||
|
|
c1bc54d904 | ||
|
|
0442c5512f | ||
|
|
6f798c0664 | ||
|
|
2094906dcb | ||
|
|
de50a0e277 | ||
|
|
3754fd51af | ||
|
|
3bc789dde4 | ||
|
|
9deefc7532 | ||
|
|
8510ee2ef3 | ||
|
|
2d744741ba | ||
|
|
7d44e9854a | ||
|
|
c5d8dd84ad | ||
|
|
2098c9628c | ||
|
|
86bec3e20f | ||
|
|
8f458e32ac | ||
|
|
1f64991a6d | ||
|
|
b3fea5526c | ||
|
|
9e7fca4c29 | ||
|
|
4c426817f4 | ||
|
|
1cae7a03f2 | ||
|
|
ae5bc2cfdf | ||
|
|
fc60755558 | ||
|
|
920cf8cf21 | ||
|
|
e0341720f5 | ||
|
|
e72c25d574 | ||
|
|
03ef902a6b | ||
|
|
aafbea48a9 | ||
|
|
9385a4a70f | ||
|
|
c75f0bdc34 | ||
|
|
69565f91c0 | ||
|
|
f8b425a366 | ||
|
|
6a6307aef4 | ||
|
|
e61547875a | ||
|
|
43d7836b2a | ||
|
|
ea12d91291 | ||
|
|
920c5d2f0f | ||
|
|
352f6ff230 | ||
|
|
08b02e0797 | ||
|
|
221369bdcd | ||
|
|
3a07584fd0 | ||
|
|
b4fe7fb185 | ||
|
|
0aa95c968a | ||
|
|
5a25d8ae15 | ||
|
|
82f01d5e79 | ||
|
|
9a465a433c | ||
|
|
cca62bd458 | ||
|
|
b28b345dad | ||
|
|
0398d542e4 | ||
|
|
36308361d2 | ||
|
|
981dda6465 | ||
|
|
2cfd29d533 | ||
|
|
f9bd4526ed | ||
|
|
c69ce749fc | ||
|
|
48e2db44be | ||
|
|
35cae84a60 | ||
|
|
cdd9a09edc | ||
|
|
c2cbca3f3c | ||
|
|
8a6395b18c | ||
|
|
dbe44e173c | ||
|
|
670b67dc55 | ||
|
|
7acf5b88c3 | ||
|
|
4d20963387 | ||
|
|
e9b09e7a68 | ||
|
|
e78e7818d1 | ||
|
|
ff12c58ee4 | ||
|
|
223ed538ae | ||
|
|
49f8b9a612 | ||
|
|
989053888f | ||
|
|
76ee7672c7 | ||
|
|
bccefb1624 | ||
|
|
6ccf555ee6 | ||
|
|
1ff03aa764 | ||
|
|
e2b6ccd8ef | ||
|
|
b0613884f0 | ||
|
|
5551280554 | ||
|
|
66ac979ea2 | ||
|
|
05f432469d | ||
|
|
c845787c81 | ||
|
|
308050e680 | ||
|
|
09d6051a9a | ||
|
|
0a5d1329bc | ||
|
|
6766fb47a0 | ||
|
|
533b5ada08 | ||
|
|
479a5af9fa | ||
|
|
0b970fb10d | ||
|
|
1124d3614d | ||
|
|
f67e5fb7e1 | ||
|
|
18bc739664 | ||
|
|
f83959ffc1 | ||
|
|
59ebab3b68 | ||
|
|
2218db6adf | ||
|
|
86cc2fb905 | ||
|
|
9ef084e5cc | ||
|
|
cbdf9ae077 | ||
|
|
b55929b645 | ||
|
|
e4a7faa664 | ||
|
|
07ef2aaa12 | ||
|
|
1d98ef0bfd | ||
|
|
3e0cd044ce | ||
|
|
eaf68d427e | ||
|
|
e1f4bf83af | ||
|
|
3f0c824da5 | ||
|
|
b0cd335854 | ||
|
|
6053e13681 | ||
|
|
3201143c88 | ||
|
|
376693ce02 | ||
|
|
da087c27e9 | ||
|
|
e29ad80772 | ||
|
|
e41d0493b6 | ||
|
|
d43679b5e5 | ||
|
|
45fb35519f | ||
|
|
914223ad1b | ||
|
|
1e07408244 | ||
|
|
1760ac890e | ||
|
|
ab90b9aa93 | ||
|
|
d59576425f | ||
|
|
357bc356f9 | ||
|
|
304364f45f | ||
|
|
8dc1eb3117 | ||
|
|
b835e0f6f9 | ||
|
|
a7e8deb5ed | ||
|
|
5f99de48ff | ||
|
|
985b8fca3e | ||
|
|
4575f3975a | ||
|
|
8169bb6a5d | ||
|
|
fe0d3cb3df | ||
|
|
539186f231 | ||
|
|
4619aa5951 | ||
|
|
27d6ae6796 | ||
|
|
1bcf22fdc7 | ||
|
|
09f15469a3 | ||
|
|
92dd2abe39 | ||
|
|
0c33b57c0d | ||
|
|
e0737418af | ||
|
|
3875df2cbd | ||
|
|
36fda7b31e | ||
|
|
e18113ab2a | ||
|
|
c400662dcc | ||
|
|
be0d1dc465 | ||
|
|
f1bef883a1 | ||
|
|
8a1c1f3ef0 | ||
|
|
ccde739e0d | ||
|
|
6df09b2539 | ||
|
|
26a01ae4b9 | ||
|
|
66589960fd | ||
|
|
fdcdcd8770 | ||
|
|
5535656717 | ||
|
|
7568cb3841 | ||
|
|
0377381729 | ||
|
|
ec7e199ddf | ||
|
|
02210be45f | ||
|
|
34ecb8be2c | ||
|
|
eb8d215cf1 | ||
|
|
b5ca915dd3 | ||
|
|
310b26154a | ||
|
|
e874c644cd | ||
|
|
6fca0d9a6b | ||
|
|
32aa65875a | ||
|
|
cad27649f9 | ||
|
|
fd52f228ab | ||
|
|
d6740f694d | ||
|
|
08a774a799 | ||
|
|
508038a0f1 | ||
|
|
2d9e0c1eb3 | ||
|
|
ae80e6dedd | ||
|
|
54f2fdb708 | ||
|
|
6a659dede2 | ||
|
|
31402aa666 | ||
|
|
651320bbc3 | ||
|
|
f33b5e9ee0 | ||
|
|
cf0fa2c244 | ||
|
|
20f5f54f10 | ||
|
|
37a4e83ffb | ||
|
|
f9d02d76ef | ||
|
|
e4e4371e60 | ||
|
|
128612c326 | ||
|
|
5033d67e74 | ||
|
|
4dfd0e514e | ||
|
|
697923a5df | ||
|
|
1e7507e169 | ||
|
|
3ca7ec7b0d | ||
|
|
f436935631 | ||
|
|
f98e12a62e | ||
|
|
451077e5c6 | ||
|
|
6dc37baf87 | ||
|
|
8ac634012c | ||
|
|
e521e9e1a6 | ||
|
|
dfb26d789f | ||
|
|
4b8127d8b0 | ||
|
|
1d7fbcc2aa | ||
|
|
2d3aa7f5f7 | ||
|
|
17d0d03bc3 | ||
|
|
d38872e057 | ||
|
|
c0383c5bbb | ||
|
|
663fa49119 | ||
|
|
38d5a99242 | ||
|
|
b2f9300cff | ||
|
|
dbb374d35b | ||
|
|
806fab55f0 | ||
|
|
b8ebb0a178 | ||
|
|
73d239fc19 | ||
|
|
447f2d8437 | ||
|
|
cabe0d8ba4 | ||
|
|
b9e24b308e | ||
|
|
3e6ccd0a45 | ||
|
|
b5fde3876d | ||
|
|
4295b34cb1 | ||
|
|
ff1946b406 | ||
|
|
18d8b7e353 | ||
|
|
e9942a56f4 | ||
|
|
a99e5383d0 | ||
|
|
9323d10f6d | ||
|
|
f9f41e65fc | ||
|
|
3bd37441ec | ||
|
|
2bc0507298 | ||
|
|
f7de66d67f | ||
|
|
a74845aab2 | ||
|
|
3e7cc40c4a | ||
|
|
b228cb4c8b | ||
|
|
ffe6c6577d | ||
|
|
6df7459937 | ||
|
|
f847f2ff46 | ||
|
|
b29fbc9ace | ||
|
|
d9e742cf7f | ||
|
|
ad70134967 | ||
|
|
18dad59d13 | ||
|
|
bef48f3cff | ||
|
|
5efcc588d2 | ||
|
|
0bc229fad9 | ||
|
|
ef380c480a | ||
|
|
5e7f1302e2 | ||
|
|
2bdb469179 | ||
|
|
d018bbd7d3 | ||
|
|
e9e2647e69 | ||
|
|
b0fa2f6b36 | ||
|
|
c1bec21d4d | ||
|
|
ded69baa8b | ||
|
|
465767ce01 | ||
|
|
efbbe3cb59 | ||
|
|
87ecf87af6 | ||
|
|
cd4f9487f6 | ||
|
|
6fdced56d8 | ||
|
|
29310fe3ae | ||
|
|
845a6d85c6 | ||
|
|
11fb414965 | ||
|
|
6eae8353a1 | ||
|
|
78995cf451 | ||
|
|
daccf7e050 | ||
|
|
330e69f417 | ||
|
|
12f5515197 | ||
|
|
306a988ace | ||
|
|
c0067a2608 | ||
|
|
93160891f5 | ||
|
|
385b3c7684 | ||
|
|
05f84acbf8 | ||
|
|
d08b0afe0f | ||
|
|
b6e6c17df4 | ||
|
|
123cdbdf6b | ||
|
|
b7220dac28 | ||
|
|
e156cfb167 | ||
|
|
9ff2c349af | ||
|
|
197cd078e2 | ||
|
|
09e8e5a4cb | ||
|
|
c84cf916be | ||
|
|
c3c700a7a9 | ||
|
|
1bdc540700 | ||
|
|
ddbe3036af | ||
|
|
aab8f66b34 | ||
|
|
3e88e06199 | ||
|
|
32ab2ada09 | ||
|
|
71a2b2347a | ||
|
|
ca9715b231 | ||
|
|
32bda1a2ed | ||
|
|
7f706bc130 | ||
|
|
7d08a6a28f | ||
|
|
9ac351c219 | ||
|
|
24e794bb38 | ||
|
|
8e8ce51313 | ||
|
|
8ed0899ff3 | ||
|
|
f00448dfbd | ||
|
|
2b619a9243 | ||
|
|
27c0a9b328 | ||
|
|
e39a8f6bcf | ||
|
|
3907495a42 | ||
|
|
07889755d9 | ||
|
|
49c8f78cff | ||
|
|
08ceb6126c | ||
|
|
4b13852de0 | ||
|
|
197f009fc9 | ||
|
|
4bc71b145e | ||
|
|
be83553487 | ||
|
|
b193a7b94c | ||
|
|
ba64c8ff4d | ||
|
|
1c165c86da | ||
|
|
6498f5a536 | ||
|
|
7745a1ff52 | ||
|
|
a8ee7068a5 | ||
|
|
8f8b95bf57 | ||
|
|
a32789f13b | ||
|
|
77b725c598 | ||
|
|
cf40b431a3 | ||
|
|
e0a1c25fee | ||
|
|
a746df7a03 | ||
|
|
47190ec8b1 | ||
|
|
a00f7bf83e | ||
|
|
2b29700fa4 | ||
|
|
84e927fd60 | ||
|
|
2d1af6429e | ||
|
|
80a7f1ee9f | ||
|
|
fdc99f8dd3 | ||
|
|
d660f9dbdf | ||
|
|
ad03e5830d | ||
|
|
032c391cb8 | ||
|
|
9173f446ab | ||
|
|
f795fa2aec | ||
|
|
34c1f389bc | ||
|
|
fdc8a61fc6 | ||
|
|
c7f507a4b9 | ||
|
|
70aef8a871 | ||
|
|
54d0681340 | ||
|
|
b279aabae3 | ||
|
|
63022fe4d0 | ||
|
|
8c4523db69 | ||
|
|
646c64a326 | ||
|
|
5a9ccbf01f | ||
|
|
0f7403b7bf | ||
|
|
2bc83a26ff | ||
|
|
09babe46d9 | ||
|
|
451e4f636b | ||
|
|
2f8999e3cb | ||
|
|
9463cd5fc2 | ||
|
|
5e2f4a0826 | ||
|
|
0083edb3ca | ||
|
|
d020c666b7 | ||
|
|
3d29651267 | ||
|
|
795aae0c78 | ||
|
|
364d66b90f | ||
|
|
2d8f0336dc | ||
|
|
5716c1b35e | ||
|
|
fecd21deca | ||
|
|
5cd8b3d4ca | ||
|
|
991613bd13 | ||
|
|
ef93197da1 | ||
|
|
35754ccd73 | ||
|
|
d6d531e08e | ||
|
|
adff75f040 | ||
|
|
3e364186c9 | ||
|
|
edb482d4ba | ||
|
|
1f2d75898b | ||
|
|
7f331907d3 | ||
|
|
29aa52a1c7 | ||
|
|
ada8dbb5f3 | ||
|
|
cfe1754c44 | ||
|
|
2cfce5144b | ||
|
|
2fe863a054 | ||
|
|
3e01091d01 | ||
|
|
7f85a8e53b | ||
|
|
c679157e24 | ||
|
|
9e7b70964e | ||
|
|
8ebbb476b2 | ||
|
|
9263b812eb | ||
|
|
f70dd52b2d | ||
|
|
e0a8f1ad04 | ||
|
|
45dac417cb | ||
|
|
1514c5bcd2 | ||
|
|
e7cfafd6d6 | ||
|
|
15d7263c95 | ||
|
|
de21a400ab | ||
|
|
e702610dc6 | ||
|
|
481eeeffc4 | ||
|
|
801e1dabed | ||
|
|
5c44c1e8f5 | ||
|
|
0ef6dcb510 | ||
|
|
0ef5e75673 | ||
|
|
e5f85ae37b | ||
|
|
5d0e9ca70b | ||
|
|
297baa08d5 | ||
|
|
dd1433a7a9 | ||
|
|
9875c416df | ||
|
|
b414e3b350 | ||
|
|
e417e8bc12 | ||
|
|
aafe3365eb | ||
|
|
694baf715c | ||
|
|
48b188d7b4 | ||
|
|
d1cb53b65a | ||
|
|
cefd633176 | ||
|
|
08e6f81a15 | ||
|
|
edbad45637 | ||
|
|
363fbd3b77 | ||
|
|
82ee3ef3d1 | ||
|
|
21502e2bb4 | ||
|
|
9d3b70d4d2 | ||
|
|
5662919f72 | ||
|
|
445d0f870e | ||
|
|
81e74fe830 | ||
|
|
50791ad51a | ||
|
|
6a65657e27 | ||
|
|
1c7190884a | ||
|
|
e0fcc3bfa6 | ||
|
|
504b5a8eb0 | ||
|
|
488eaa9bef | ||
|
|
676c3703aa | ||
|
|
deec4df125 | ||
|
|
eedc8e81d0 | ||
|
|
28456ffafe | ||
|
|
a7c3ae37aa | ||
|
|
d043bcf7be | ||
|
|
72a1c59cac | ||
|
|
6299b1d8e9 | ||
|
|
11ae057b0a | ||
|
|
d34c99baf4 | ||
|
|
b7e83b74d8 | ||
|
|
919f75bb62 | ||
|
|
16079bd1d4 | ||
|
|
401d4227d1 | ||
|
|
7f9f32ca58 | ||
|
|
6937b8120b | ||
|
|
be80ce35b2 | ||
|
|
99349ce361 | ||
|
|
3a1b808169 | ||
|
|
798079eb53 | ||
|
|
98749f2c9b | ||
|
|
3a675696cd | ||
|
|
632eeaa527 | ||
|
|
9c85ce404b | ||
|
|
f8f8afca88 | ||
|
|
11dc048709 | ||
|
|
328442c121 | ||
|
|
4304880b6b | ||
|
|
aef99a7bb8 | ||
|
|
e387b21ed6 | ||
|
|
b6de33d501 | ||
|
|
48160e0414 | ||
|
|
d998c1a19e | ||
|
|
2cca412425 | ||
|
|
72ae858c14 | ||
|
|
77e2df9dfb | ||
|
|
a5bb0392b1 | ||
|
|
cb39cf1a03 | ||
|
|
3b94615934 |
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
27
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: "[BUG]"
|
||||
labels: kind/bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Environment**
|
||||
- Operator Version:
|
||||
- Kubernetes/OpenShift Version:
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea/enhancement for this project
|
||||
title: "[ENHANCE]"
|
||||
labels: kind/enhancement
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
7
.github/md_config.json
vendored
Normal file
7
.github/md_config.json
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"ignorePatterns": [
|
||||
{
|
||||
"pattern": "^(?!http).+"
|
||||
}
|
||||
]
|
||||
}
|
||||
68
.github/workflows/init-branch-release.yaml
vendored
Normal file
68
.github/workflows/init-branch-release.yaml
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
name: Init Release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
TARGET_BRANCH:
|
||||
description: 'TARGET_BRANCH on which release will be based'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
TARGET_VERSION:
|
||||
description: 'TARGET_VERSION to build kubernetes manifests with using Kustomize'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions: {}
|
||||
|
||||
jobs:
|
||||
prepare-release:
|
||||
permissions:
|
||||
contents: write # for peter-evans/create-pull-request to create branch
|
||||
pull-requests: write # for peter-evans/create-pull-request to create a PR
|
||||
name: Automatically generate version and manifests on ${{ inputs.TARGET_BRANCH }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4.2.2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
ref: ${{ inputs.TARGET_BRANCH }}
|
||||
|
||||
- name: Check if TARGET_VERSION is well formed.
|
||||
run: |
|
||||
set -xue
|
||||
# Target version must not contain 'v' prefix
|
||||
if echo "${{ inputs.TARGET_VERSION }}" | grep -e '^v'; then
|
||||
echo "::error::Target version '${{ inputs.TARGET_VERSION }}' should not begin with a 'v' prefix, refusing to continue." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Create VERSION information
|
||||
run: |
|
||||
set -ue
|
||||
echo "Bumping version from $(cat VERSION) to ${{ inputs.TARGET_VERSION }}"
|
||||
echo "${{ inputs.TARGET_VERSION }}" > VERSION
|
||||
|
||||
- name: Replace latest tag with version from input
|
||||
run: |
|
||||
set -ue
|
||||
VERSION=${{ inputs.TARGET_VERSION }} make update-manifests-version
|
||||
git diff
|
||||
|
||||
- name: Generate new set of manifests
|
||||
run: |
|
||||
set -ue
|
||||
make k8s-manifests
|
||||
git diff
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@v7.0.5
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
body: Updating VERSION and manifests to ${{ inputs.TARGET_VERSION }}
|
||||
branch: update-version
|
||||
branch-suffix: random
|
||||
signoff: true
|
||||
labels: release
|
||||
208
.github/workflows/pull_request.yaml
vendored
208
.github/workflows/pull_request.yaml
vendored
@@ -1,45 +1,82 @@
|
||||
name: Pull Request
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- 'v**'
|
||||
paths:
|
||||
- '**'
|
||||
- '!.markdownlint.yaml'
|
||||
- '!.vale.ini'
|
||||
- '!Dockerfile-docs'
|
||||
- '!docs-nginx.conf'
|
||||
- '!docs/**'
|
||||
- '!theme_common'
|
||||
- '!theme_override'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
DOCKER_UBI_FILE_PATH: Dockerfile.ubi
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.98
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: README.md
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
|
||||
fetch-depth: 0
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Helm chart unit tests
|
||||
uses: d3adb5/helm-unittest-action@v2
|
||||
with:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
# Get highest tag and remove any suffixes with '-'
|
||||
- name: Get Highest tag
|
||||
id: highest_tag
|
||||
run: |
|
||||
highest=$(git tag -l --sort -version:refname | head -n 1)
|
||||
echo "tag=${highest%%-*}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.45.2
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
@@ -48,15 +85,11 @@ jobs:
|
||||
cd deployments/kubernetes/chart/reloader
|
||||
helm lint
|
||||
|
||||
- name: Link check
|
||||
uses: gaurav-nelson/github-action-markdown-link-check@v1
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
kubectl version --client=true
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
@@ -69,73 +102,132 @@ jobs:
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
- name: Generate Tags
|
||||
id: generate_tag
|
||||
run: |
|
||||
sha=${{ github.event.pull_request.head.sha }}
|
||||
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
|
||||
echo "##[set-output name=GIT_TAG;]$(echo ${tag})"
|
||||
|
||||
ubi_tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-UBI-${sha:0:8}"
|
||||
echo "GIT_TAG=$(echo ${tag})" >> $GITHUB_OUTPUT
|
||||
echo "GIT_UBI_TAG=$(echo ${ubi_tag})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
- name: Generate image repository path for ghcr registry
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image
|
||||
uses: docker/build-push-action@v2
|
||||
# To identify any broken changes in dockerfiles or dependencies
|
||||
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
push: false
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Comment on PR
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
- name: Build Docker UBI Image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
message: '@${{ github.actor }} Image is available for testing. `docker pull ${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}`'
|
||||
allow-repeats: false
|
||||
context: .
|
||||
file: ${{ env.DOCKER_UBI_FILE_PATH }}
|
||||
pull: true
|
||||
push: false
|
||||
build-args: |
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
BUILDER_IMAGE=${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.highest_tag.outputs.tag }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_UBI_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Notify Failure
|
||||
if: failure()
|
||||
uses: mshick/add-pr-comment@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
- name: Check if Helm validation needs to run
|
||||
uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
message: '@${{ github.actor }} Yikes! You better fix it before anyone else finds out! [Build](https://github.com/${{ github.repository }}/commit/${{ github.event.pull_request.head.sha }}/checks) has Failed!'
|
||||
allow-repeats: false
|
||||
filters: |
|
||||
chart:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
outputs:
|
||||
helm_chart_changed: ${{ steps.filter.outputs.chart }}
|
||||
|
||||
helm-validation:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Helm Chart Validation
|
||||
needs:
|
||||
- build
|
||||
|
||||
if: ${{ needs.build.outputs.helm_chart_changed }} == "true"
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Helm chart unit tests
|
||||
uses: d3adb5/helm-unittest-action@v2
|
||||
with:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
- name: Get version for chart from helm repo
|
||||
id: chart_eval
|
||||
run: |
|
||||
current_chart_version=$(helm search repo stakater/reloader | tail -n 1 | awk '{print $2}')
|
||||
echo "CURRENT_CHART_VERSION=$(echo ${current_chart_version})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Updated Chart version from Chart.yaml
|
||||
uses: mikefarah/yq@master
|
||||
id: new_chart_version
|
||||
with:
|
||||
cmd: yq e '.version' deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
|
||||
- name: Check Version
|
||||
uses: aleoyakas/check-semver-increased-action@v1
|
||||
id: check-version
|
||||
with:
|
||||
current-version: ${{ steps.new_chart_version.outputs.result }}
|
||||
previous-version: ${{ steps.chart_eval.outputs.CURRENT_CHART_VERSION }}
|
||||
|
||||
- name: Fail if Helm Chart version isnt updated
|
||||
if: steps.check-version.outputs.is-version-increased != 'true'
|
||||
run: |
|
||||
echo "Helm Chart Version wasnt updated"
|
||||
exit 1
|
||||
|
||||
22
.github/workflows/pull_request_docs.yaml
vendored
Normal file
22
.github/workflows/pull_request_docs.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Pull Request for Documentation Changes
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- '.markdownlint.yaml'
|
||||
- '.vale.ini'
|
||||
- 'Dockerfile-docs'
|
||||
- 'docs-nginx.conf'
|
||||
- 'docs/**'
|
||||
- 'theme_common'
|
||||
- 'theme_override'
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.98
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: docs
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
105
.github/workflows/push-helm-chart.yaml
vendored
Normal file
105
.github/workflows/push-helm-chart.yaml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Push Helm Chart
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'deployments/kubernetes/chart/reloader/**'
|
||||
|
||||
env:
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # to push artifacts to `ghcr.io`
|
||||
|
||||
name: Build
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
submodules: recursive
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Add Stakater Helm Repo
|
||||
run: |
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
- name: Get version for chart from helm repo
|
||||
id: chart_eval
|
||||
run: |
|
||||
current_chart_version=$(helm search repo stakater/reloader | tail -n 1 | awk '{print $2}')
|
||||
echo "CURRENT_CHART_VERSION=$(echo ${current_chart_version})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Get Updated Chart version from Chart.yaml
|
||||
uses: mikefarah/yq@master
|
||||
id: new_chart_version
|
||||
with:
|
||||
cmd: yq e '.version' deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
|
||||
- name: Check Version
|
||||
uses: aleoyakas/check-semver-increased-action@v1
|
||||
id: check-version
|
||||
with:
|
||||
current-version: ${{ steps.new_chart_version.outputs.result }}
|
||||
previous-version: ${{ steps.chart_eval.outputs.CURRENT_CHART_VERSION }}
|
||||
|
||||
- name: Fail if Helm Chart version isnt updated
|
||||
if: steps.check-version.outputs.is-version-increased != 'true'
|
||||
run: |
|
||||
echo "Helm Chart Version wasnt updated"
|
||||
exit 1
|
||||
|
||||
- name: Login to GHCR Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io/stakater
|
||||
username: ${{ secrets.GHCR_USERNAME }}
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
|
||||
- name: Publish Helm chart to ghcr.io
|
||||
run: |
|
||||
helm package ./deployments/kubernetes/chart/reloader --destination ./packaged-chart
|
||||
helm push ./packaged-chart/*.tgz oci://ghcr.io/stakater/charts
|
||||
rm -rf ./packaged-chart
|
||||
|
||||
- name: Publish Helm chart to gh-pages
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
86
.github/workflows/push-pr-image.yaml
vendored
Normal file
86
.github/workflows/push-pr-image.yaml
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
name: Push PR Image on Label
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
types: [ labeled ]
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
|
||||
build-and-push-pr-image:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Build and Push PR Image
|
||||
if: ${{ github.event.label.name == 'build-and-push-pr-image' }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Generate Tags
|
||||
id: generate_tag
|
||||
run: |
|
||||
sha=${{ github.event.pull_request.head.sha }}
|
||||
tag="SNAPSHOT-PR-${{ github.event.pull_request.number }}-${sha:0:8}"
|
||||
echo "GIT_TAG=$(echo ${tag})" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Generate image repository path for ghcr registry
|
||||
run: |
|
||||
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Login to ghcr registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{env.REGISTRY}}
|
||||
username: stakater-user
|
||||
password: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Build Docker Image
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
227
.github/workflows/push.yaml
vendored
227
.github/workflows/push.yaml
vendored
@@ -1,57 +1,69 @@
|
||||
name: Push
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
branches:
|
||||
- master
|
||||
- 'v**'
|
||||
|
||||
env:
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
GOLANG_VERSION: 1.18.2
|
||||
KUBERNETES_VERSION: "1.18.0"
|
||||
KIND_VERSION: "0.10.0"
|
||||
DOCKER_UBI_FILE_PATH: Dockerfile.ubi
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # to push artifacts to `ghcr.io`
|
||||
|
||||
name: Build
|
||||
if: "! contains(toJSON(github.event.commits.*.message), '[skip-ci]')"
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal token
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
submodules: recursive
|
||||
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Set up Go
|
||||
id: go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: v1.45.2
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --short --client
|
||||
kubectl version --short --client | grep -q ${KUBERNETES_VERSION}
|
||||
kubectl version --client=true
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
@@ -68,32 +80,24 @@ jobs:
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Generate Tag
|
||||
id: generate_tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Registry
|
||||
uses: docker/login-action@v1
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path
|
||||
- name: Generate image repository path for Docker registry
|
||||
run: |
|
||||
echo IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
echo DOCKER_IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image to Docker registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
@@ -103,73 +107,112 @@ jobs:
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.new_tag }}
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
|
||||
# Generate tag for operator without "v"
|
||||
- name: Generate Operator Tag
|
||||
id: generate_operator_tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: false
|
||||
DEFAULT_BUMP: patch
|
||||
DRY_RUN: true
|
||||
|
||||
# Update chart tag to the latest semver tag
|
||||
- name: Update Chart Version
|
||||
env:
|
||||
VERSION: ${{ steps.generate_operator_tag.outputs.new_tag }}
|
||||
run: make bump-chart
|
||||
|
||||
- name: Helm Template
|
||||
run: |
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ > deployments/kubernetes/reloader.yaml
|
||||
helm template reloader deployments/kubernetes/chart/reloader/ --output-dir deployments/kubernetes/manifests/ && mv deployments/kubernetes/manifests/reloader/templates/* deployments/kubernetes/manifests/ && rm -r deployments/kubernetes/manifests/reloader
|
||||
|
||||
# Publish helm chart
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
- name: Build and Push Docker UBI Image to Docker registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
branch: master
|
||||
repository: stakater-charts
|
||||
target_dir: docs
|
||||
token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
charts_dir: deployments/kubernetes/chart/
|
||||
charts_url: ${{ env.HELM_REGISTRY_URL }}
|
||||
owner: stakater
|
||||
linting: on
|
||||
commit_username: stakater-user
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
# Commit back changes
|
||||
- name: Commit files
|
||||
run: |
|
||||
git config --local user.email "stakater@gmail.com"
|
||||
git config --local user.name "stakater-user"
|
||||
git status
|
||||
git add .
|
||||
git commit -m "[skip-ci] Update artifacts" -a
|
||||
|
||||
- name: Push changes
|
||||
uses: ad-m/github-push-action@master
|
||||
context: .
|
||||
file: ${{ env.DOCKER_UBI_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: |
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
BUILDER_IMAGE=${{ env.DOCKER_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}-ubi
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Login to ghcr registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
github_token: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
branch: ${{ github.ref }}
|
||||
registry: ${{env.REGISTRY}}
|
||||
username: stakater-user
|
||||
password: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Generate image repository path for ghcr registry
|
||||
run: |
|
||||
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image to ghcr registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Build and Push Docker UBI Image to ghcr registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_UBI_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: |
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
BUILDER_IMAGE=${{ env.GHCR_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:merge-${{ github.event.number }}-ubi
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
docs:
|
||||
- '.markdownlint.yaml'
|
||||
- '.vale.ini'
|
||||
- 'Dockerfile-docs'
|
||||
- 'docs-nginx.conf'
|
||||
- 'docs/**'
|
||||
- 'README.md'
|
||||
- 'theme_common'
|
||||
- 'theme_override'
|
||||
|
||||
# run only if 'docs' files were changed
|
||||
- name: Build and Push Docker Image for Docs to ghcr registry
|
||||
if: steps.filter.outputs.docs == 'true'
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile-docs
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:merge-${{ github.event.number }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.36.0
|
||||
uses: anothrNick/github-tag-action@1.71.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
WITH_V: true
|
||||
DEFAULT_BUMP: patch
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
WITH_V: false
|
||||
CUSTOM_TAG: merge-${{ github.event.number }}
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
@@ -178,5 +221,5 @@ jobs:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
214
.github/workflows/release.yaml
vendored
214
.github/workflows/release.yaml
vendored
@@ -1,44 +1,228 @@
|
||||
name: Release Go project
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
env:
|
||||
GOLANG_VERSION: 1.18.2
|
||||
DOCKER_FILE_PATH: Dockerfile
|
||||
DOCKER_UBI_FILE_PATH: Dockerfile.ubi
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
REGISTRY: ghcr.io
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # to push artifacts to `ghcr.io`
|
||||
|
||||
name: GoReleaser build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # See: https://goreleaser.com/ci/actions/
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
# Setting up helm binary
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
go-version: ${{ env.GOLANG_VERSION }}
|
||||
id: go
|
||||
version: v3.11.3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
|
||||
sudo install ./kubectl /usr/local/bin/ && rm kubectl
|
||||
kubectl version --client=true
|
||||
|
||||
- name: Install Kind
|
||||
run: |
|
||||
curl -L -o kind https://github.com/kubernetes-sigs/kind/releases/download/v${KIND_VERSION}/kind-linux-amd64
|
||||
sudo install ./kind /usr/local/bin && rm kind
|
||||
kind version
|
||||
kind version | grep -q ${KIND_VERSION}
|
||||
|
||||
- name: Create Kind Cluster
|
||||
run: |
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Get Tag from Github Ref
|
||||
id: generate_tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Generate image repository path for Docker registry
|
||||
run: |
|
||||
echo DOCKER_IMAGE_REPOSITORY=$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
- name: Build and Push Docker Image to Docker registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Build and Push Docker UBI Image to Docker registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_UBI_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: |
|
||||
BUILDER_IMAGE=${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}-ubi
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Login to ghcr registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{env.REGISTRY}}
|
||||
username: stakater-user
|
||||
password: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Generate image repository path for ghcr registry
|
||||
run: |
|
||||
echo GHCR_IMAGE_REPOSITORY=${{env.REGISTRY}}/$(echo ${{ github.repository }} | tr '[:upper:]' '[:lower:]') >> $GITHUB_ENV
|
||||
|
||||
# tag this image as latest as it will be used in plain manifests
|
||||
- name: Build and Push Docker Image to ghcr registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }},${{ env.GHCR_IMAGE_REPOSITORY }}:latest
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Build and Push Docker UBI Image to ghcr registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ env.DOCKER_UBI_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: |
|
||||
BUILDER_IMAGE=${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}-ubi
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Build and Push Docker Image for Docs to ghcr registry
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile-docs
|
||||
pull: true
|
||||
push: true
|
||||
cache-to: type=inline
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}/docs:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
##############################
|
||||
## Add steps to generate required artifacts for a release here(helm chart, operator manifest etc.)
|
||||
##############################
|
||||
|
||||
# # Generate tag for operator without "v"
|
||||
# - name: Generate Operator Tag
|
||||
# id: generate_operator_tag
|
||||
# uses: anothrNick/github-tag-action@1.70.0
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
# WITH_V: false
|
||||
# DEFAULT_BUMP: patch
|
||||
# DRY_RUN: true
|
||||
|
||||
# # Update chart tag to the latest semver tag
|
||||
# - name: Update Chart Version
|
||||
# env:
|
||||
# VERSION: ${{ steps.generate_operator_tag.outputs.RELEASE_VERSION }}
|
||||
# run: make bump-chart
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@master
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
|
||||
- name: Notify Slack
|
||||
uses: 8398a7/action-slack@v3
|
||||
if: always()
|
||||
if: always() # Pick up events even if the job fails or is canceled.
|
||||
with:
|
||||
status: ${{ job.status }}
|
||||
fields: repo,author,action,eventName,ref,workflow
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.STAKATER_GITHUB_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.STAKATER_DELIVERY_SLACK_WEBHOOK }}
|
||||
|
||||
17
.github/workflows/reloader-enterprise-published.yml
vendored
Normal file
17
.github/workflows/reloader-enterprise-published.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Dispatch event for published release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger target repository workflow
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token ${{ secrets.STAKATER_AB_TOKEN_FOR_RLDR }}" \
|
||||
https://api.github.com/repos/stakater-ab/reloader-enterprise/dispatches \
|
||||
-d '{"event_type":"release-published"}'
|
||||
17
.github/workflows/reloader-enterprise-unpublished.yml
vendored
Normal file
17
.github/workflows/reloader-enterprise-unpublished.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Dispatch event for unpublished release
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [unpublished ]
|
||||
|
||||
jobs:
|
||||
dispatch:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger target repository workflow
|
||||
run: |
|
||||
curl -X POST \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
-H "Authorization: token ${{ secrets.STAKATER_AB_TOKEN_FOR_RLDR }}" \
|
||||
https://api.github.com/repos/stakater-ab/reloader-enterprise/dispatches \
|
||||
-d '{"event_type":"release-unpublished "}'
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -9,4 +9,11 @@ _gopath/
|
||||
.vscode
|
||||
vendor
|
||||
dist
|
||||
Reloader
|
||||
Reloader
|
||||
!**/chart/reloader
|
||||
*.tgz
|
||||
styles/
|
||||
site/
|
||||
/mkdocs.yml
|
||||
yq
|
||||
bin
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "theme_common"]
|
||||
path = theme_common
|
||||
url = https://github.com/stakater/stakater-docs-mkdocs-theme.git
|
||||
@@ -10,6 +10,7 @@ builds:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- ppc64le
|
||||
archives:
|
||||
- name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
|
||||
snapshot:
|
||||
|
||||
6
.markdownlint.yaml
Normal file
6
.markdownlint.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"MD007": { "indent": 4 },
|
||||
"MD013": false,
|
||||
"MD024": false,
|
||||
"MD029": { "style": one },
|
||||
}
|
||||
10
.vale.ini
Normal file
10
.vale.ini
Normal file
@@ -0,0 +1,10 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.40/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
[*.md]
|
||||
|
||||
BasedOnStyles = Vale
|
||||
3
CODE_OF_CONDUCT.md
Normal file
3
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Code of Conduct
|
||||
|
||||
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
@@ -2,7 +2,7 @@ ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.18.2} as builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.23.1} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
|
||||
35
Dockerfile-docs
Normal file
35
Dockerfile-docs
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM python:3.12 as builder
|
||||
|
||||
# set workdir
|
||||
RUN mkdir -p $HOME/application
|
||||
WORKDIR $HOME/application
|
||||
|
||||
# copy the entire application
|
||||
COPY --chown=1001:root . .
|
||||
|
||||
RUN pip3 install -r theme_common/requirements.txt
|
||||
|
||||
# Combine Theme Resources
|
||||
RUN python theme_common/scripts/combine_theme_resources.py theme_common/resources theme_override/resources dist/_theme
|
||||
# Produce mkdocs file
|
||||
RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdocs.yml theme_override/mkdocs.yml mkdocs.yml
|
||||
|
||||
# build the docs
|
||||
RUN mkdocs build
|
||||
|
||||
FROM nginxinc/nginx-unprivileged:1.27-alpine as deploy
|
||||
COPY --from=builder $HOME/application/site/ /usr/share/nginx/html/reloader/
|
||||
COPY docs-nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
# set non-root user
|
||||
USER 1001
|
||||
|
||||
LABEL name="Stakater Reloader Documentation" \
|
||||
maintainer="Stakater <hello@stakater.com>" \
|
||||
vendor="Stakater" \
|
||||
release="1" \
|
||||
summary="Documentation for Stakater Reloader"
|
||||
|
||||
EXPOSE 8080:8080/tcp
|
||||
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
39
Dockerfile.ubi
Normal file
39
Dockerfile.ubi
Normal file
@@ -0,0 +1,39 @@
|
||||
ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE} AS SRC
|
||||
|
||||
FROM ${BASE_IMAGE:-registry.access.redhat.com/ubi9/ubi:latest} AS ubi
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
RUN dnf update -y && dnf install -y binutils
|
||||
# prep target rootfs for scratch container
|
||||
WORKDIR /
|
||||
RUN mkdir /image && \
|
||||
ln -s usr/bin /image/bin && \
|
||||
ln -s usr/sbin /image/sbin && \
|
||||
ln -s usr/lib64 /image/lib64 && \
|
||||
ln -s usr/lib /image/lib && \
|
||||
mkdir -p /image/{usr/bin,usr/lib64,usr/lib,root,home,proc,etc,sys,var,dev}
|
||||
|
||||
COPY ubi-build-files-${TARGETARCH}.txt /tmp
|
||||
# Copy all the required files from the base UBI image into the image directory
|
||||
# As the go binary is not statically compiled this includes everything needed for CGO to work, cacerts, tzdata and RH release files
|
||||
RUN tar cf /tmp/files.tar -T /tmp/ubi-build-files-${TARGETARCH}.txt && tar xf /tmp/files.tar -C /image/
|
||||
|
||||
# Generate a rpm database which contains all the packages that you said were needed in ubi-build-files-*.txt
|
||||
RUN rpm --root /image --initdb \
|
||||
&& PACKAGES=$(rpm -qf $(cat /tmp/ubi-build-files-${TARGETARCH}.txt) | grep -v "is not owned by any package" | sort -u) \
|
||||
&& echo dnf install -y 'dnf-command(download)' \
|
||||
&& dnf download --destdir / ${PACKAGES} \
|
||||
&& rpm --root /image -ivh --justdb --nodeps `for i in ${PACKAGES}; do echo $i.rpm; done`
|
||||
|
||||
FROM scratch
|
||||
COPY --from=ubi /image/ /
|
||||
COPY --from=SRC /manager .
|
||||
USER 65532:65532
|
||||
# Port for metrics and probes
|
||||
EXPOSE 9090
|
||||
|
||||
ENTRYPOINT ["/manager"]
|
||||
5
MAINTAINERS
Normal file
5
MAINTAINERS
Normal file
@@ -0,0 +1,5 @@
|
||||
Bharath Nallapeta <bharath.nallapeta@stakater.com> (@bnallapeta)
|
||||
Karl Johan Grahn <karl.johan@stakater.com> (@karl-johan-grahn)
|
||||
Muhammad Sheryar Butt <sheryar@stakater.com> (@SheryarButt)
|
||||
Muneeb Aijaz <muneeb@stakater.com> (@MuneebAijaz)
|
||||
Tanveer Alam <tanveer.alam@stakater.com> (@tanalam2411)
|
||||
92
Makefile
92
Makefile
@@ -9,7 +9,7 @@ ALL_ARCH ?= arm64 arm amd64
|
||||
BUILDER_IMAGE ?=
|
||||
BASE_IMAGE ?=
|
||||
BINARY ?= Reloader
|
||||
DOCKER_IMAGE ?= stakater/reloader
|
||||
DOCKER_IMAGE ?= ghcr.io/stakater/reloader
|
||||
|
||||
# Default value "dev"
|
||||
VERSION ?= 0.0.1
|
||||
@@ -24,6 +24,75 @@ LDFLAGS =
|
||||
GOPROXY ?=
|
||||
GOPRIVATE ?=
|
||||
|
||||
## Location to install dependencies to
|
||||
LOCALBIN ?= $(shell pwd)/bin
|
||||
$(LOCALBIN):
|
||||
mkdir -p $(LOCALBIN)
|
||||
|
||||
## Tool Binaries
|
||||
KUBECTL ?= kubectl
|
||||
KUSTOMIZE ?= $(LOCALBIN)/kustomize-$(KUSTOMIZE_VERSION)
|
||||
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen-$(CONTROLLER_TOOLS_VERSION)
|
||||
ENVTEST ?= $(LOCALBIN)/setup-envtest-$(ENVTEST_VERSION)
|
||||
GOLANGCI_LINT = $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION)
|
||||
YQ ?= $(LOCALBIN)/yq
|
||||
|
||||
## Tool Versions
|
||||
KUSTOMIZE_VERSION ?= v5.3.0
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
ENVTEST_VERSION ?= release-0.17
|
||||
GOLANGCI_LINT_VERSION ?= v1.57.2
|
||||
|
||||
YQ_VERSION ?= v4.27.5
|
||||
YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)"
|
||||
|
||||
|
||||
.PHONY: yq
|
||||
yq: $(YQ) ## Download YQ locally if needed
|
||||
$(YQ):
|
||||
@test -d $(LOCALBIN) || mkdir -p $(LOCALBIN)
|
||||
@curl --retry 3 -fsSL $(YQ_DOWNLOAD_URL) -o $(YQ) || { \
|
||||
echo "Failed to download yq from $(YQ_DOWNLOAD_URL). Please check the URL and your network connection."; \
|
||||
exit 1; \
|
||||
}
|
||||
@chmod +x $(YQ)
|
||||
@echo "yq downloaded successfully to $(YQ)."
|
||||
|
||||
|
||||
.PHONY: kustomize
|
||||
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
|
||||
$(KUSTOMIZE): $(LOCALBIN)
|
||||
$(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
|
||||
|
||||
.PHONY: controller-gen
|
||||
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
|
||||
$(CONTROLLER_GEN): $(LOCALBIN)
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
|
||||
|
||||
.PHONY: envtest
|
||||
envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
|
||||
$(ENVTEST): $(LOCALBIN)
|
||||
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary (ideally with version)
|
||||
# $2 - package url which can be installed
|
||||
# $3 - specific version of package
|
||||
define go-install-tool
|
||||
@[ -f $(1) ] || { \
|
||||
set -e; \
|
||||
package=$(2)@$(3) ;\
|
||||
echo "Downloading $${package}" ;\
|
||||
GOBIN=$(LOCALBIN) go install $${package} ;\
|
||||
mv "$$(echo "$(1)" | sed "s/-$(3)$$//")" $(1) ;\
|
||||
}
|
||||
endef
|
||||
|
||||
default: build test
|
||||
|
||||
install:
|
||||
@@ -80,9 +149,28 @@ apply:
|
||||
|
||||
deploy: binary-image push apply
|
||||
|
||||
.PHONY: k8s-manifests
|
||||
k8s-manifests: $(KUSTOMIZE) ## Generate k8s manifests using Kustomize from 'manifests' folder
|
||||
$(KUSTOMIZE) build ./deployments/kubernetes/ -o ./deployments/kubernetes/reloader.yaml
|
||||
|
||||
.PHONY: update-manifests-version
|
||||
update-manifests-version: ## Generate k8s manifests using Kustomize from 'manifests' folder
|
||||
sed -i 's/image:.*/image: \"ghcr.io\/stakater\/reloader:v$(VERSION)"/g' deployments/kubernetes/manifests/deployment.yaml
|
||||
|
||||
# Bump Chart
|
||||
bump-chart:
|
||||
sed -i "s/^version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/^appVersion:.*/appVersion: v$(VERSION)/" deployments/kubernetes/chart/reloader/Chart.yaml
|
||||
sed -i "s/tag:.*/tag: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
sed -i "s/version:.*/version: v$(VERSION)/" deployments/kubernetes/chart/reloader/values.yaml
|
||||
|
||||
YQ_VERSION = v4.42.1
|
||||
YQ_BIN = $(shell pwd)/yq
|
||||
CURRENT_ARCH := $(shell uname -m | sed 's/x86_64/amd64/' | sed 's/aarch64/arm64/')
|
||||
|
||||
YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_linux_$(CURRENT_ARCH)"
|
||||
|
||||
yq-install:
|
||||
@echo "Downloading yq $(YQ_VERSION) for linux/$(CURRENT_ARCH)"
|
||||
@curl -sL $(YQ_DOWNLOAD_URL) -o $(YQ_BIN)
|
||||
@chmod +x $(YQ_BIN)
|
||||
@echo "yq $(YQ_VERSION) installed at $(YQ_BIN)"
|
||||
|
||||
282
README.md
282
README.md
@@ -1,13 +1,13 @@
|
||||
#  RELOADER
|
||||
#  Reloader
|
||||
|
||||
[](https://goreportcard.com/report/github.com/stakater/reloader)
|
||||
[](http://godoc.org/github.com/stakater/reloader)
|
||||
[](https://godoc.org/github.com/stakater/reloader)
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](https://hub.docker.com/r/stakater/reloader/)
|
||||
[](LICENSE)
|
||||
[](http://stakater.com/?utm_source=Reloader&utm_medium=github)
|
||||
[](https://stakater.com/?utm_source=Reloader&utm_medium=github)
|
||||
|
||||
## Problem
|
||||
|
||||
@@ -17,17 +17,32 @@ We would like to watch if some change happens in `ConfigMap` and/or `Secret`; th
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
|
||||
|
||||
## Enterprise Version
|
||||
|
||||
Reloader is available in two different versions:
|
||||
|
||||
1. Open Source Version
|
||||
1. Enterprise Version, which includes:
|
||||
- SLA (Service Level Agreement) for support and unique requests
|
||||
- Slack support
|
||||
- Certified images
|
||||
|
||||
Contact [`sales@stakater.com`](mailto:sales@stakater.com) for info about Reloader Enterprise.
|
||||
|
||||
## Compatibility
|
||||
|
||||
Reloader is compatible with kubernetes >= 1.9
|
||||
Reloader is compatible with Kubernetes >= 1.19
|
||||
|
||||
## How to use Reloader
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo-configmap` or `Secret` called `foo-secret` or both. Then add your annotation (by default `reloader.stakater.com/auto`) to main metadata of your `Deployment`
|
||||
You have a `Deployment` called `foo` and a `ConfigMap` and/or a `Secret` either mounted as a volume or defined as a environment variable. The `ConfigMap` and `Secret` can be named whatever, but for the sake of this example, lets refer to the `ConfigMap` as `foo-configmap` and the secret as `foo-secret`.
|
||||
|
||||
Add the annotation to the main metadata of your `Deployment`. By default this would be `reloader.stakater.com/auto`.
|
||||
|
||||
```yaml
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: foo
|
||||
annotations:
|
||||
reloader.stakater.com/auto: "true"
|
||||
spec:
|
||||
@@ -37,7 +52,9 @@ spec:
|
||||
|
||||
This will discover deploymentconfigs/deployments/daemonsets/statefulset/rollouts automatically where `foo-configmap` or `foo-secret` is being used either via environment variable or from volume mount. And it will perform rolling upgrade on related pods when `foo-configmap` or `foo-secret`are updated.
|
||||
|
||||
You can restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
You can filter it by the type of monitored resource and use typed versions of `auto` annotation. If you want to discover changes only in mounted `Secret`s and ignore changes in `ConfigMap`s, add `secret.reloader.stakater.com/auto` annotation instead. Analogously, you can use `configmap.reloader.stakater.com/auto` annotation to look for changes in mounted `ConfigMap`, changes in any of mounted `Secret`s will not trigger a rolling upgrade on related pods.
|
||||
|
||||
You can also restrict this discovery to only `ConfigMap` or `Secret` objects that
|
||||
are tagged with a special annotation. To take advantage of that, annotate
|
||||
your deploymentconfigs/deployments/daemonsets/statefulset/rollouts like this:
|
||||
|
||||
@@ -72,8 +89,14 @@ will always restart upon a change in configmaps or secrets it uses, regardless
|
||||
of whether they have the `reloader.stakater.com/match: "true"` annotation or
|
||||
not.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a deploymentconfig, deployment, daemonset, statefulset or rollout.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations mentioned [here](#Configmap) or [here](#Secret)
|
||||
Similarly, `reloader.stakater.com/auto` and its typed version (`secret.reloader.stakater.com/auto` or `configmap.reloader.stakater.com/auto`) do not work together. If you have both annotations in your deployment, then only one of them needs to be true to trigger the restart. For example, having both `reloader.stakater.com/auto: "true"` and `secret.reloader.stakater.com/auto: "false"` or both `reloader.stakater.com/auto: "false"` and `secret.reloader.stakater.com/auto: "true"` will restart upon a change in a secret it uses.
|
||||
|
||||
We can also specify a specific configmap or secret which would trigger rolling upgrade only upon change in our specified configmap or secret, this way, it will not trigger rolling upgrade upon changes in all configmaps or secrets used in a `deploymentconfig`, `deployment`, `daemonset`, `statefulset` or `rollout`.
|
||||
To do this either set the auto annotation to `"false"` (`reloader.stakater.com/auto: "false"`) or remove it altogether, and use annotations for [Configmap](.#Configmap) or [Secret](.#Secret).
|
||||
|
||||
It's also possible to enable auto reloading for all resources, by setting the `--auto-reload-all` flag.
|
||||
In this case, all resources that do not have the auto annotation (or its typed version) set to `"false"`, will be reloaded automatically when their ConfigMaps or Secrets are updated.
|
||||
Notice that setting the auto annotation to an undefined value counts as false as-well.
|
||||
|
||||
### Configmap
|
||||
|
||||
@@ -133,32 +156,31 @@ spec:
|
||||
|
||||
### NOTES
|
||||
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with reloader.
|
||||
- For [rollouts](https://github.com/argoproj/argo-rollouts/) reloader simply triggers a change is up to you how you configure the rollout strategy.
|
||||
- Reloader also supports [sealed-secrets](https://github.com/bitnami-labs/sealed-secrets). [Here](docs/Reloader-with-Sealed-Secrets.md) are the steps to use sealed-secrets with Reloader.
|
||||
- For [`rollouts`](https://github.com/argoproj/argo-rollouts/) Reloader simply triggers a change is up to you how you configure the `rollout` strategy.
|
||||
- `reloader.stakater.com/auto: "true"` will only reload the pod, if the configmap or secret is used (as a volume mount or as an env) in `DeploymentConfigs/Deployment/Daemonsets/Statefulsets`
|
||||
- `secret.reloader.stakater.com/reload` or `configmap.reloader.stakater.com/reload` annotation will reload the pod upon changes in specified configmap or secret, irrespective of the usage of configmap or secret.
|
||||
- you may override the auto annotation with the `--auto-annotation` flag
|
||||
- you may override the secret typed auto annotation with the `--secret-auto-annotation` flag
|
||||
- you may override the configmap typed auto annotation with the `--configmap-auto-annotation` flag
|
||||
- you may override the search annotation with the `--auto-search-annotation` flag
|
||||
and the match annotation with the `--search-match-annotation` flag
|
||||
- you may override the configmap annotation with the `--configmap-annotation` flag
|
||||
- you may override the secret annotation with the `--secret-annotation` flag
|
||||
- you may want to prevent watching certain namespaces with the `--namespaces-to-ignore` flag
|
||||
- you may want to watch only a set of namespaces with certain labels by using the `--namespace-selector` flag
|
||||
- you may want to watch only a set of secrets/configmaps with certain labels by using the `--resource-label-selector` flag
|
||||
- you may want to prevent watching certain resources with the `--resources-to-ignore` flag
|
||||
- you can configure logging in JSON format with the `--log-format=json` option
|
||||
- you can configure the "reload strategy" with the `--reload-strategy=<strategy-name>` option (details below)
|
||||
|
||||
## Reload Strategies
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers
|
||||
referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation
|
||||
on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools
|
||||
to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its
|
||||
`configMap` or `secret` be deleted and recreated.
|
||||
This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
|
||||
Reloader supports multiple "reload" strategies for performing rolling upgrades to resources. The following list describes them:
|
||||
|
||||
- **env-vars**: When a tracked `configMap`/`secret` is updated, this strategy attaches a Reloader specific environment variable to any containers referencing the changed `configMap` or `secret` on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy can be specified with the `--reload-strategy=env-vars` argument. Note: This is the default reload strategy.
|
||||
- **annotations**: When a tracked `configMap`/`secret` is updated, this strategy attaches a `reloader.stakater.com/last-reloaded-from` pod template annotation on the owning resource (e.g., `Deployment`, `StatefulSet`, etc.). This strategy is useful when using resource syncing tools like ArgoCD, since it will not cause these tools to detect configuration drift after a resource is reloaded. Note: Since the attached pod template annotation only tracks the last reload source, this strategy will reload any tracked resource should its `configMap` or `secret` be deleted and recreated. This strategy can be specified with the `--reload-strategy=annotations` argument.
|
||||
|
||||
## Deploying to Kubernetes
|
||||
|
||||
You can deploy Reloader by following methods:
|
||||
@@ -171,18 +193,71 @@ You can apply vanilla manifests by changing `RELEASE-NAME` placeholder provided
|
||||
kubectl apply -f https://raw.githubusercontent.com/stakater/Reloader/master/deployments/kubernetes/reloader.yaml
|
||||
```
|
||||
|
||||
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces.
|
||||
By default, Reloader gets deployed in `default` namespace and watches changes `secrets` and `configmaps` in all namespaces. Additionally, in the default Reloader deployment, the following resource limits and requests are set:
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following args (`spec.template.spec.containers.args`) to its container :
|
||||
```yaml
|
||||
resources:
|
||||
limits:
|
||||
cpu: 150m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 128Mi
|
||||
```
|
||||
|
||||
| Args | Description |
|
||||
| -------------------------------- | -------------------- |
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by passing the following arguments (`spec.template.spec.containers.args`) to its container:
|
||||
|
||||
| Argument | Description |
|
||||
|----------------------------------|----------------------|
|
||||
| --resources-to-ignore=configMaps | To ignore configMaps |
|
||||
| --resources-to-ignore=secrets | To ignore secrets |
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the reloader pods to `0`.
|
||||
**Note:** At one time only one of these resource can be ignored, trying to do it will cause error in Reloader. Workaround for ignoring both resources is by scaling down the Reloader pods to `0`.
|
||||
|
||||
### Vanilla kustomize
|
||||
Reloader can be configured to only watch secrets/configmaps with one or more labels using the `--resource-label-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (i.e. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
|
||||
|
||||
**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
|
||||
|
||||
These selectors can be combined, for example with:
|
||||
|
||||
```yaml
|
||||
--resource-label-selector=reloader=enabled,key-exists,another-label in (value1,value2,value3)
|
||||
```
|
||||
|
||||
Only configmaps or secrets labeled like the following will be watched:
|
||||
|
||||
```yaml
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
reloader: enabled
|
||||
key-exists: yes
|
||||
another-label: value1
|
||||
```
|
||||
|
||||
Reloader can be configured to only watch namespaces labeled with one or more labels using the `--namespace-selector` parameter. Supported operators are `!, in, notin, ==, =, !=`, if no operator is found the 'exists' operator is inferred (i.e. key only). Additional examples of these selectors can be found in the [Kubernetes Docs](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors).
|
||||
|
||||
**Note:** The old `:` delimited key value mappings are deprecated and if provided will be translated to `key=value`. Likewise, if a wildcard value is provided (e.g. `key:*`) it will be translated to the standalone `key` which checks for key existence.
|
||||
|
||||
These selectors can be combined, for example with:
|
||||
|
||||
```yaml
|
||||
--namespace-selector=reloader=enabled,test=true
|
||||
```
|
||||
|
||||
Only namespaces labeled as below would be watched and eligible for reloads:
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
reloader: enabled
|
||||
test: true
|
||||
```
|
||||
|
||||
### Vanilla Kustomize
|
||||
|
||||
You can also apply the vanilla manifests by running the following command
|
||||
|
||||
@@ -200,7 +275,7 @@ You can write your own `kustomization.yaml` using ours as a 'base' and write pat
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
bases:
|
||||
resources:
|
||||
- https://github.com/stakater/Reloader/deployments/kubernetes
|
||||
|
||||
namespace: reloader
|
||||
@@ -208,7 +283,9 @@ namespace: reloader
|
||||
|
||||
### Helm Charts
|
||||
|
||||
Alternatively if you have configured helm on your cluster, you can add reloader to helm from our public chart repository and deploy it via helm using below mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating reloader from Helm2 to Helm3
|
||||
Alternatively if you have configured helm on your cluster, you can add Reloader to helm from our public chart repository and deploy it via helm using below-mentioned commands. Follow [this](docs/Helm2-to-Helm3.md) guide, in case you have trouble migrating Reloader from Helm2 to Helm3.
|
||||
|
||||
#### Installation
|
||||
|
||||
```bash
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
@@ -216,34 +293,114 @@ helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
helm repo update
|
||||
|
||||
helm install stakater/reloader # For helm3 add --generate-name flag or set the release name
|
||||
|
||||
helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
|
||||
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
```
|
||||
|
||||
**Note:** By default reloader watches in all namespaces. To watch in single namespace, please run following command. It will install reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
#### Uninstalling
|
||||
|
||||
```bash
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test # For helm3 add --generate-name flag or set the release name
|
||||
helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
```
|
||||
|
||||
Reloader can be configured to ignore the resources `secrets` and `configmaps` by using the following parameters of `values.yaml` file:
|
||||
### Parameters
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- | -------------------------------------------------------------- | ------- |
|
||||
| ignoreSecrets | To ignore secrets. Valid value are either `true` or `false` | boolean |
|
||||
| ignoreConfigMaps | To ignore configMaps. Valid value are either `true` or `false` | boolean |
|
||||
#### Global Parameters
|
||||
|
||||
`Note`: At one time only one of these resource can be ignored, trying to do it will cause error in helm template compilation.
|
||||
| Parameter | Description | Type | Default |
|
||||
|---------------------------|-----------------------------------------------------------------|-------|---------|
|
||||
| `global.imagePullSecrets` | Reference to one or more secrets to be used when pulling images | array | `[]` |
|
||||
|
||||
You can also set the log format of Reloader to json by setting `logFormat` to `json` in values.yaml and apply the chart
|
||||
#### Common Parameters
|
||||
|
||||
You can enable to scrape Reloader's Prometheus metrics by setting `serviceMonitor.enabled` or `podMonitor.enabled` to `true` in values.yaml file. Service monitor will be removed in future releases of reloader in favour of Pod monitor.
|
||||
| Parameter | Description | Type | Default |
|
||||
|--------------------|-------------------------------|--------|---------|
|
||||
| `nameOverride` | replace the name of the chart | string | `""` |
|
||||
| `fullnameOverride` | replace the generated name | string | `""` |
|
||||
|
||||
**Note:** Reloading of OpenShift (DeploymentConfig) and/or Argo Rollouts has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions. This can be done by changing the following parameters:
|
||||
#### Core Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type |
|
||||
| ---------------- |------------------------------------------------------------------------------| ------- |
|
||||
| isOpenshift | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean |
|
||||
| isArgoRollouts | Enable Argo Rollouts. Valid value are either `true` or `false` | boolean |
|
||||
| reloadOnCreate | Enable reload on create events. Valid value are either `true` or `false` | boolean |
|
||||
| Parameter | Description | Type | Default |
|
||||
|-----------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------|-------------|-----------|
|
||||
| `reloader.autoReloadAll` | | boolean | `false` |
|
||||
| `reloader.isArgoRollouts` | Enable Argo `Rollouts`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configMaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
|
||||
| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
|
||||
| `reloader.namespaceSelector` | List of comma separated namespaces to select, if multiple are provided, they are combined with the AND operator | string | `""` |
|
||||
| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
|
||||
| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
|
||||
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
|
||||
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
|
||||
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
|
||||
| `reloader.legacy.rbac` | | boolean | `false` |
|
||||
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
|
||||
| `reloader.enableMetricsByNamespace` | Expose an additional Prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces) | boolean | `false` |
|
||||
|
||||
#### Deployment Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
|-------------------------------------------------|-----------------------------------------------------------------------------------------|--------|-------------------|
|
||||
| `reloader.deployment.replicas` | Number of replicas, if you wish to run multiple replicas set `reloader.enableHA = true` | int | 1 |
|
||||
| `reloader.deployment.revisionHistoryLimit` | Limit the number of revisions retained in the revision history | int | 2 |
|
||||
| `reloader.deployment.nodeSelector` | Scheduling pod to a specific node based on set labels | map | `{}` |
|
||||
| `reloader.deployment.affinity` | Set affinity rules on pod | map | `{}` |
|
||||
| `reloader.deployment.securityContext` | Set pod security context | map | `{}` |
|
||||
| `reloader.deployment.containerSecurityContext` | Set container security context | map | `{}` |
|
||||
| `reloader.deployment.tolerations` | A list of `tolerations` to be applied to the deployment | array | `[]` |
|
||||
| `reloader.deployment.topologySpreadConstraints` | Topology spread constraints for pod assignment | array | `[]` |
|
||||
| `reloader.deployment.annotations` | Set deployment annotations | map | `{}` |
|
||||
| `reloader.deployment.labels` | Set deployment labels, default to stakater settings | array | `see values.yaml` |
|
||||
| `reloader.deployment.image` | Set container image name, tag and policy | array | `see values.yaml` |
|
||||
| `reloader.deployment.env` | Support for extra environment variables | array | `[]` |
|
||||
| `reloader.deployment.livenessProbe` | Set liveness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.readinessProbe` | Set readiness probe timeout values | map | `{}` |
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
|
||||
#### Other Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
|----------------------------------------|-----------------------------------------------------------------|---------|---------|
|
||||
| `reloader.service` | | map | `{}` |
|
||||
| `reloader.rbac.enabled` | Specifies whether a role based access control should be created | boolean | `true` |
|
||||
| `reloader.serviceAccount.create` | Specifies whether a ServiceAccount should be created | boolean | `true` |
|
||||
| `reloader.custom_annotations` | Add custom annotations | map | `{}` |
|
||||
| `reloader.serviceMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics (legacy) | boolean | `false` |
|
||||
| `reloader.podMonitor.enabled` | Enable to scrape Reloader's Prometheus metrics | boolean | `false` |
|
||||
| `reloader.podDisruptionBudget.enabled` | Limit the number of pods of a replicated application | boolean | `false` |
|
||||
| `reloader.netpol.enabled` | | boolean | `false` |
|
||||
| `reloader.volumeMounts` | Mount volume | array | `[]` |
|
||||
| `reloader.volumes` | Add volume to a pod | array | `[]` |
|
||||
| `reloader.webhookUrl` | Add webhook to Reloader | string | `""` |
|
||||
|
||||
#### Additional Remarks
|
||||
|
||||
- Both `namespaceSelector` & `resourceLabelSelector` can be used together. If they are then both conditions must be met for the configmap or secret to be eligible to trigger reload events. (e.g. If a configMap matches `resourceLabelSelector` but `namespaceSelector` does not match the namespace the configmap is in, it will be ignored).
|
||||
- At one time only one of the resources `ignoreConfigMaps` or `ignoreSecrets` can be ignored, trying to do both will cause error in helm template compilation
|
||||
- Reloading of OpenShift (DeploymentConfig) and/or Argo `Rollouts` has to be enabled explicitly because it might not be always possible to use it on a cluster with restricted permissions
|
||||
- `isOpenShift` Recent versions of OpenShift (tested on 4.13.3) require the specified user to be in an `uid` range which is dynamically assigned by the namespace. The solution is to unset the runAsUser variable via ``deployment.securityContext.runAsUser=null`` and let OpenShift assign it at install
|
||||
- `reloadOnCreate` controls how Reloader handles secrets being added to the cache for the first time. If `reloadOnCreate` is set to true:
|
||||
1. Configmaps/secrets being added to the cache will cause Reloader to perform a rolling update of the associated workload
|
||||
1. When applications are deployed for the first time, Reloader will perform a rolling update of the associated workload
|
||||
1. If you are running Reloader in HA mode all workloads will have a rolling update performed when a new leader is elected
|
||||
- `reloadOnDelete` controls how Reloader handles secrets being deleted. If `reloadOnDelete` is set to true:
|
||||
1. Configmaps/secrets being deleted will cause Reloader to perform a rolling update of the associated workload
|
||||
- `serviceMonitor` will be removed in future releases of Reloader in favour of Pod monitor
|
||||
- If `reloadOnCreate` is set to false:
|
||||
1. Updates to configmaps/secrets that occur while there is no leader will not be picked up by the new leader until a subsequent update of the configmap/secret occurs
|
||||
1. In the worst case the window in which there can be no leader is 15s as this is the LeaseDuration
|
||||
- If `reloadOnDelete` is set to false:
|
||||
1. Deleting of configmaps/secrets has no effect to pods that references these resources.
|
||||
- By default, `reloadOnCreate`, `reloadOnDelete` and `syncAfterRestart` are all set to false. All need to be enabled explicitly
|
||||
|
||||
## Help
|
||||
|
||||
@@ -253,7 +410,7 @@ You can find more documentation [here](docs)
|
||||
|
||||
### Have a question?
|
||||
|
||||
File a GitHub [issue](https://github.com/stakater/Reloader/issues), or send us an [email](mailto:stakater@gmail.com).
|
||||
File a GitHub [issue](https://github.com/stakater/Reloader/issues).
|
||||
|
||||
### Talk to us on Slack
|
||||
|
||||
@@ -271,19 +428,27 @@ Please use the [issue tracker](https://github.com/stakater/Reloader/issues) to r
|
||||
### Developing
|
||||
|
||||
1. Deploy Reloader.
|
||||
2. Run `okteto up` to activate your development container.
|
||||
3. `make build`.
|
||||
4. `./Reloader`
|
||||
1. Run `okteto up` to activate your development container.
|
||||
1. `make build`
|
||||
1. `./Reloader`
|
||||
|
||||
PRs are welcome. In general, we follow the "fork-and-pull" Git workflow.
|
||||
|
||||
1. **Fork** the repo on GitHub
|
||||
2. **Clone** the project to your own machine
|
||||
3. **Commit** changes to your own branch
|
||||
4. **Push** your work back up to your fork
|
||||
5. Submit a **Pull request** so that we can review your changes
|
||||
1. **Fork** the repo on GitHub
|
||||
1. **Clone** the project to your own machine
|
||||
1. **Commit** changes to your own branch
|
||||
1. **Push** your work back up to your fork
|
||||
1. Submit a **Pull request** so that we can review your changes
|
||||
|
||||
NOTE: Be sure to merge the latest from "upstream" before making a pull request!
|
||||
**NOTE:** Be sure to merge the latest from "upstream" before making a pull request!
|
||||
|
||||
## Release Processes
|
||||
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request. When a GitHub release is made, the corresponding image is built and pushed to the registry.
|
||||
|
||||
_Repository git tagging_: The Reloader repository is tagged on every push to main. The creation of a tag does not trigger anything else, it just acts as a pointer to a commit on main.
|
||||
|
||||
_Helm chart versioning_: The Reloader Helm chart release process is still [work in progress](https://github.com/stakater/Reloader/issues/684). This page will be updated when the process is settled. As a heads-up, to address the issues that are inherent in the current process the chart will most probably be relocated to the [Stakater charts repository](https://github.com/stakater/charts/). This setup is common in open-source repositories. When a GitHub release has been manually created in this repository, an image will be built, and Renovate in the charts repository will update the Helm chart to use it.
|
||||
|
||||
## Changelog
|
||||
|
||||
@@ -291,18 +456,17 @@ View our closed [Pull Requests](https://github.com/stakater/Reloader/pulls?q=is%
|
||||
|
||||
## License
|
||||
|
||||
Apache2 © [Stakater](http://stakater.com)
|
||||
Apache2 © [Stakater][website]
|
||||
|
||||
## About
|
||||
|
||||
`Reloader` is maintained by [Stakater][website]. Like it? Please let us know at <hello@stakater.com>
|
||||
|
||||
See [our other projects][community]
|
||||
See [our other projects](https://github.com/stakater)
|
||||
or contact us in case of professional services and queries on <hello@stakater.com>
|
||||
|
||||
[website]: http://stakater.com/
|
||||
[community]: https://github.com/stakater/
|
||||
[website]: https://stakater.com
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented here why we re-created [Reloader](docs/Reloader-vs-ConfigmapController.md)
|
||||
- [ConfigmapController](https://github.com/fabric8io/configmapcontroller); We documented [here](docs/Reloader-vs-ConfigmapController.md) why we re-created Reloader
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: v0.0.117
|
||||
appVersion: v0.0.117
|
||||
version: 1.1.1
|
||||
appVersion: v1.1.0
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
@@ -16,16 +16,6 @@ maintainers:
|
||||
- name: Stakater
|
||||
email: hello@stakater.com
|
||||
- name: rasheedamir
|
||||
email: rasheed@aurorasolutions.io
|
||||
- name: waseem-h
|
||||
email: waseemhassan@stakater.com
|
||||
email: rasheed@stakater.com
|
||||
- name: faizanahmad055
|
||||
email: faizan.ahmad55@outlook.com
|
||||
- name: kahootali
|
||||
email: ali.kahoot@aurorasolutions.io
|
||||
- name: ahmadiq
|
||||
email: ahmad@aurorasolutions.io
|
||||
- name: ahsan-storm
|
||||
email: ahsanmuhammad1@outlook.com
|
||||
- name: ahmedwaleedmalik
|
||||
email: waleed@stakater.com
|
||||
|
||||
@@ -22,12 +22,29 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create pod anti affinity labels
|
||||
*/}}
|
||||
{{- define "reloader-podAntiAffinity" -}}
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -45,4 +62,4 @@ Create the annotations to support helm3
|
||||
{{- define "reloader-helm3.annotations" -}}
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
@@ -17,7 +17,6 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -32,6 +31,16 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- if and (.Capabilities.APIVersions.Has "apps.openshift.io/v1") (.Values.reloader.isOpenshift) }}
|
||||
- apiGroups:
|
||||
- "apps.openshift.io"
|
||||
@@ -68,13 +77,33 @@ rules:
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
- "batch"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- cronjobs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -17,7 +17,6 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -25,5 +24,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -15,10 +15,14 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
{{- if not (.Values.reloader.enableHA) }}
|
||||
replicas: 1
|
||||
{{- else }}
|
||||
replicas: {{ .Values.reloader.deployment.replicas }}
|
||||
revisionHistoryLimit: 2
|
||||
{{- end}}
|
||||
revisionHistoryLimit: {{ .Values.reloader.deployment.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
@@ -41,27 +45,60 @@ spec:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.reloader.deployment.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml .Values.reloader.deployment.nodeSelector | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{- if or (.Values.reloader.deployment.affinity) (.Values.reloader.enableHA) }}
|
||||
affinity:
|
||||
{{- if .Values.reloader.deployment.affinity }}
|
||||
{{ toYaml .Values.reloader.deployment.affinity | indent 8 }}
|
||||
{{- else }}
|
||||
{{ include "reloader-podAntiAffinity" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml .Values.reloader.deployment.tolerations | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.topologySpreadConstraints }}
|
||||
topologySpreadConstraints:
|
||||
{{ toYaml .Values.reloader.deployment.topologySpreadConstraints | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if $.Values.global.imageRegistry }}
|
||||
- image: "{{ $.Values.global.imageRegistry }}/{{ .Values.reloader.deployment.image.base }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- else }}
|
||||
- image: "{{ .Values.reloader.deployment.image.name }}:{{ .Values.reloader.deployment.image.tag }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.reloader.deployment.image.pullPolicy }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if or (.Values.reloader.deployment.env.open) (.Values.reloader.deployment.env.secret) (.Values.reloader.deployment.env.field) (eq .Values.reloader.watchGlobally false) }}
|
||||
env:
|
||||
- name: GOMAXPROCS
|
||||
{{- if .Values.reloader.deployment.gomaxprocsOverride }}
|
||||
value: {{ .Values.reloader.deployment.gomaxprocsOverride | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
resource: limits.cpu
|
||||
divisor: '1'
|
||||
{{- end }}
|
||||
- name: GOMEMLIMIT
|
||||
{{- if .Values.reloader.deployment.gomemlimitOverride }}
|
||||
value: {{ .Values.reloader.deployment.gomemlimitOverride | quote }}
|
||||
{{- else }}
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
resource: limits.memory
|
||||
divisor: '1'
|
||||
{{- end }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.open }}
|
||||
{{- if not (empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -78,6 +115,17 @@ spec:
|
||||
key: {{ $name | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $secret, $values := .Values.reloader.deployment.env.existing }}
|
||||
{{- range $name, $key := $values }}
|
||||
{{- if not ( empty $name) }}
|
||||
- name: {{ $name | quote }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ $secret | quote }}
|
||||
key: {{ $key | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- range $name, $value := .Values.reloader.deployment.env.field }}
|
||||
{{- if not ( empty $value) }}
|
||||
- name: {{ $name | quote }}
|
||||
@@ -92,19 +140,32 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableMetricsByNamespace }}
|
||||
- name: METRICS_COUNT_BY_NAMESPACE
|
||||
value: enabled
|
||||
{{- end }}
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: {{ .Values.reloader.deployment.livenessProbe.timeoutSeconds | default "5" }}
|
||||
failureThreshold: {{ .Values.reloader.deployment.livenessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.livenessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.livenessProbe.successThreshold | default "1" }}
|
||||
initialDelaySeconds: {{ .Values.reloader.deployment.livenessProbe.initialDelaySeconds | default "10" }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
@@ -113,21 +174,34 @@ spec:
|
||||
failureThreshold: {{ .Values.reloader.deployment.readinessProbe.failureThreshold | default "5" }}
|
||||
periodSeconds: {{ .Values.reloader.deployment.readinessProbe.periodSeconds | default "10" }}
|
||||
successThreshold: {{ .Values.reloader.deployment.readinessProbe.successThreshold | default "1" }}
|
||||
initialDelaySeconds: {{ .Values.reloader.deployment.readinessProbe.initialDelaySeconds | default "10" }}
|
||||
|
||||
{{- with .Values.reloader.deployment.containerSecurityContext }}
|
||||
securityContext: {{ toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- $containerSecurityContext := .Values.reloader.deployment.containerSecurityContext | default dict }}
|
||||
{{- if .Values.reloader.readOnlyRootFileSystem }}
|
||||
{{- $_ := set $containerSecurityContext "readOnlyRootFilesystem" true }}
|
||||
{{- end }}
|
||||
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
securityContext:
|
||||
{{- toYaml $containerSecurityContext | nindent 10 }}
|
||||
|
||||
{{- if (or (.Values.reloader.deployment.volumeMounts) (eq .Values.reloader.readOnlyRootFileSystem true)) }}
|
||||
volumeMounts:
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
- mountPath: /tmp/
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.deployment.volumeMounts }}
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (ne .Values.reloader.reloadStrategy "default")}}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.logLevel }}
|
||||
- "--log-level={{ .Values.reloader.logLevel }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreSecrets }}
|
||||
- "--resources-to-ignore=secrets"
|
||||
{{- end }}
|
||||
@@ -137,7 +211,12 @@ spec:
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.resourceLabelSelector }}
|
||||
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
@@ -150,6 +229,14 @@ spec:
|
||||
{{- if .Values.reloader.custom_annotations.auto }}
|
||||
- "--auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.secret_auto }}
|
||||
- "--secret-auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.secret_auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap_auto }}
|
||||
- "--configmap-auto-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.configmap_auto }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.search }}
|
||||
- "--auto-search-annotation"
|
||||
@@ -159,6 +246,10 @@ spec:
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.webhookUrl }}
|
||||
- "--webhook-url"
|
||||
- "{{ .Values.reloader.webhookUrl }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.isArgoRollouts true }}
|
||||
- "--is-Argo-Rollouts={{ .Values.reloader.isArgoRollouts }}"
|
||||
@@ -166,9 +257,21 @@ spec:
|
||||
{{- if eq .Values.reloader.reloadOnCreate true }}
|
||||
- "--reload-on-create={{ .Values.reloader.reloadOnCreate }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.reloadOnDelete true }}
|
||||
- "--reload-on-delete={{ .Values.reloader.reloadOnDelete }}"
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.syncAfterRestart true }}
|
||||
- "--sync-after-restart={{ .Values.reloader.syncAfterRestart }}"
|
||||
{{- end }}
|
||||
{{- if ne .Values.reloader.reloadStrategy "default" }}
|
||||
- "--reload-strategy={{ .Values.reloader.reloadStrategy }}"
|
||||
{{- end }}
|
||||
{{- if or (gt (int .Values.reloader.deployment.replicas) 1) (.Values.reloader.enableHA) }}
|
||||
- "--enable-ha=true"
|
||||
{{- end}}
|
||||
{{- if eq .Values.reloader.autoReloadAll true }}
|
||||
- "--auto-reload-all=true"
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.resources }}
|
||||
resources:
|
||||
@@ -181,8 +284,13 @@ spec:
|
||||
{{- if hasKey .Values.reloader.deployment "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.deployment.automountServiceAccountToken }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
{{- if (or (.Values.reloader.deployment.volumes) (eq .Values.reloader.readOnlyRootFileSystem true)) }}
|
||||
volumes:
|
||||
{{- if eq .Values.reloader.readOnlyRootFileSystem true }}
|
||||
- emptyDir: {}
|
||||
name: tmp-volume
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.deployment.volumes }}
|
||||
{{- . | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -0,0 +1,39 @@
|
||||
{{- if and ( .Values.reloader.netpol.enabled ) }}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 6 }}
|
||||
{{- end }}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- ports:
|
||||
- port: http
|
||||
{{- with .Values.reloader.netpol.from}}
|
||||
from:
|
||||
{{- toYaml .| nindent 8 }}
|
||||
{{- end }}
|
||||
egress:
|
||||
- ports:
|
||||
- port: 443
|
||||
{{- with .Values.reloader.netpol.to}}
|
||||
to:
|
||||
{{- toYaml .| nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.reloader.podDisruptionBudget.enabled }}
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
minAvailable: {{ .Values.reloader.podDisruptionBudget.minAvailable }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
{{- end }}
|
||||
@@ -1,26 +1,55 @@
|
||||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.reloader.podMonitor.enabled ) }}
|
||||
{{- if ( .Values.reloader.podMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
{{- if .Values.reloader.podMonitor.annotations }}
|
||||
annotations:
|
||||
{{ tpl (toYaml .Values.reloader.podMonitor.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.podMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.podMonitor.labels | indent 4}}
|
||||
{{ tpl (toYaml .Values.reloader.podMonitor.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.podMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.reloader.podMonitor.namespace . }}
|
||||
{{- else }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- port: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ .Values.reloader.podMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.podMonitor.timeout }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.scheme }}
|
||||
scheme: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.bearerTokenSecret }}
|
||||
bearerTokenSecret: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml .| nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.timeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.reloader.podMonitor.honorLabels }}
|
||||
{{- with .Values.reloader.podMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{- tpl (toYaml . | nindent 6) $ }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.podMonitor.podTargetLabels }}
|
||||
podTargetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -77,4 +77,34 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- apiGroups:
|
||||
- "coordination.k8s.io"
|
||||
resources:
|
||||
- leases
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -17,7 +17,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-role-binding
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -25,5 +25,5 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
21
deployments/kubernetes/chart/reloader/templates/secret.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
{{- if .Values.reloader.deployment.env.secret -}}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
type: Opaque
|
||||
data:
|
||||
{{ if .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD -}}
|
||||
ALERT_ON_RELOAD: {{ .Values.reloader.deployment.env.secret.ALERT_ON_RELOAD | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_SINK -}}
|
||||
ALERT_SINK: {{ .Values.reloader.deployment.env.secret.ALERT_SINK | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL -}}
|
||||
ALERT_WEBHOOK_URL: {{ .Values.reloader.deployment.env.secret.ALERT_WEBHOOK_URL | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{- if .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO -}}
|
||||
ALERT_ADDITIONAL_INFO: {{ .Values.reloader.deployment.env.secret.ALERT_ADDITIONAL_INFO | b64enc | quote }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
@@ -13,7 +13,7 @@ metadata:
|
||||
{{ toYaml .Values.reloader.service.labels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
{{- if .Values.reloader.deployment.labels }}
|
||||
|
||||
@@ -22,5 +22,5 @@ metadata:
|
||||
{{ toYaml .Values.reloader.matchLabels | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
@@ -2,25 +2,54 @@
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
{{- if .Values.reloader.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{ tpl (toYaml .Values.reloader.serviceMonitor.annotations) . | indent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.serviceMonitor.labels }}
|
||||
{{ toYaml .Values.reloader.serviceMonitor.labels | indent 4}}
|
||||
{{ tpl (toYaml .Values.reloader.serviceMonitor.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- if .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ .Values.reloader.serviceMonitor.namespace }}
|
||||
namespace: {{ tpl .Values.reloader.serviceMonitor.namespace . }}
|
||||
{{- else }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- targetPort: http
|
||||
path: "/metrics"
|
||||
{{- if .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ .Values.reloader.serviceMonitor.interval }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ .Values.reloader.serviceMonitor.timeout }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.interval }}
|
||||
interval: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.scheme }}
|
||||
scheme: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.bearerTokenFile }}
|
||||
bearerTokenFile: {{ . }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.tlsConfig }}
|
||||
tlsConfig:
|
||||
{{- toYaml .| nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.timeout }}
|
||||
scrapeTimeout: {{ . }}
|
||||
{{- end }}
|
||||
honorLabels: {{ .Values.reloader.serviceMonitor.honorLabels }}
|
||||
{{- with .Values.reloader.serviceMonitor.metricRelabelings }}
|
||||
metricRelabelings:
|
||||
{{- tpl (toYaml . | nindent 6) $ }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.relabelings }}
|
||||
relabelings:
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.serviceMonitor.targetLabels }}
|
||||
targetLabels:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
jobLabel: {{ template "reloader-fullname" . }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") (.Values.reloader.verticalPodAutoscaler.enabled) }}
|
||||
apiVersion: autoscaling.k8s.io/v1
|
||||
kind: VerticalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "reloader-labels.chart" . | nindent 4 }}
|
||||
spec:
|
||||
{{- with .Values.reloader.verticalPodAutoscaler.recommenders }}
|
||||
recommenders:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
resourcePolicy:
|
||||
containerPolicies:
|
||||
- containerName: {{ template "reloader-fullname" . }}
|
||||
{{- with .Values.reloader.verticalPodAutoscaler.controlledResources }}
|
||||
controlledResources:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.verticalPodAutoscaler.controlledValues }}
|
||||
controlledValues: {{ .Values.reloader.verticalPodAutoscaler.controlledValues }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.verticalPodAutoscaler.maxAllowed }}
|
||||
maxAllowed:
|
||||
{{ toYaml .Values.reloader.verticalPodAutoscaler.maxAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.verticalPodAutoscaler.minAllowed }}
|
||||
minAllowed:
|
||||
{{ toYaml .Values.reloader.verticalPodAutoscaler.minAllowed | nindent 8 }}
|
||||
{{- end }}
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "reloader-fullname" . }}
|
||||
{{- with .Values.reloader.verticalPodAutoscaler.updatePolicy }}
|
||||
updatePolicy:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -0,0 +1,63 @@
|
||||
suite: Deployment
|
||||
|
||||
templates:
|
||||
- deployment.yaml
|
||||
|
||||
tests:
|
||||
- it: sets readOnlyRootFilesystem in container securityContext when reloader.readOnlyRootFileSystem is true
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: true
|
||||
deployment:
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: false
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: sets readOnlyRootFilesystem in container securityContext even if reloader.deployment.containerSecurityContext is null
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: true
|
||||
deployment:
|
||||
containerSecurityContext: null
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: does not override readOnlyRootFilesystem in container securityContext based on reloader.readOnlyRootFileSystem
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: false
|
||||
deployment:
|
||||
containerSecurityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
asserts:
|
||||
- equal:
|
||||
path: spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem
|
||||
value: true
|
||||
|
||||
- it: template is still valid with no defined containerSecurityContext
|
||||
set:
|
||||
reloader:
|
||||
readOnlyRootFileSystem: false
|
||||
deployment:
|
||||
containerSecurityContext: null
|
||||
asserts:
|
||||
- isEmpty:
|
||||
path: spec.template.spec.containers[0].securityContext
|
||||
|
||||
- it: template still sets POD_NAME and POD_NAMESPACE environment variables when enableHA is true
|
||||
set:
|
||||
reloader:
|
||||
enableHA: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].env
|
||||
content:
|
||||
name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
@@ -3,28 +3,46 @@ global:
|
||||
## Reference to one or more secrets to be used when pulling images
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
imageRegistry: ""
|
||||
imagePullSecrets: []
|
||||
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
reloader:
|
||||
autoReloadAll: false
|
||||
isArgoRollouts: false
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
reloadOnCreate: false
|
||||
reloadOnDelete: false
|
||||
syncAfterRestart: false
|
||||
reloadStrategy: default # Set to default, env-vars or annotations
|
||||
ignoreNamespaces: "" # Comma separated list of namespaces to ignore
|
||||
logFormat: "" #json
|
||||
namespaceSelector: "" # Comma separated list of k8s label selectors for namespaces selection
|
||||
resourceLabelSelector: "" # Comma separated list of k8s label selectors for configmap/secret selection
|
||||
logFormat: "" # json
|
||||
logLevel: info # Log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
|
||||
enableMetricsByNamespace: false
|
||||
deployment:
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
|
||||
revisionHistoryLimit: 2
|
||||
|
||||
nodeSelector:
|
||||
# cloud.google.com/gke-nodepool: default-pool
|
||||
|
||||
@@ -42,6 +60,8 @@ reloader:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
@@ -58,14 +78,27 @@ reloader:
|
||||
# effect: "NoSchedule"
|
||||
tolerations: []
|
||||
|
||||
# Topology spread constraints for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/
|
||||
# Example:
|
||||
# topologySpreadConstraints:
|
||||
# - maxSkew: 1
|
||||
# topologyKey: zone
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app: my-app
|
||||
topologySpreadConstraints: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v0.0.117
|
||||
version: v1.1.0
|
||||
image:
|
||||
name: stakater/reloader
|
||||
tag: v0.0.117
|
||||
name: ghcr.io/stakater/reloader
|
||||
base: stakater/reloader
|
||||
tag: v1.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
@@ -73,8 +106,21 @@ reloader:
|
||||
open:
|
||||
# secret supports Key value pair as environment variables. It gets the values based on keys from default reloader secret if any.
|
||||
secret:
|
||||
# ALERT_ON_RELOAD: <"true"|"false">
|
||||
# ALERT_SINK: <"slack"> # By default it will be a raw text based webhook
|
||||
# ALERT_WEBHOOK_URL: <"webhook_url">
|
||||
# ALERT_ADDITIONAL_INFO: <"Additional Info like Cluster Name if needed">
|
||||
# field supports Key value pair as environment variables. It gets the values from other fields of pod.
|
||||
field:
|
||||
# existing secret, you can specify multiple existing secrets, for each
|
||||
# specify the env var name followed by the key in existing secret that
|
||||
# will be used to populate the env var
|
||||
existing:
|
||||
# existing_secret_name:
|
||||
# ALERT_ON_RELOAD: alert_on_reload_key
|
||||
# ALERT_SINK: alert_sink_key
|
||||
# ALERT_WEBHOOK_URL: alert_webhook_key
|
||||
# ALERT_ADDITIONAL_INFO: alert_additional_info_key
|
||||
|
||||
# Liveness and readiness probe timeout values.
|
||||
livenessProbe: {}
|
||||
@@ -101,8 +147,16 @@ reloader:
|
||||
pod:
|
||||
annotations: {}
|
||||
priorityClassName: ""
|
||||
# imagePullSecrets:
|
||||
# - name: myregistrykey
|
||||
|
||||
# Put "0" in either to have go runtime ignore the set value.
|
||||
# Otherwise, see https://pkg.go.dev/runtime#hdr-Environment_Variables for GOMAXPROCS and GOMEMLIMIT
|
||||
gomaxprocsOverride: ""
|
||||
gomemlimitOverride: ""
|
||||
|
||||
service: {}
|
||||
|
||||
# labels: {}
|
||||
# annotations: {}
|
||||
# port: 9090
|
||||
@@ -118,7 +172,7 @@ reloader:
|
||||
annotations: {}
|
||||
# The name of the ServiceAccount to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name:
|
||||
name:
|
||||
# Optional flags to pass to the Reloader entrypoint
|
||||
# Example:
|
||||
# custom_annotations:
|
||||
@@ -132,20 +186,148 @@ reloader:
|
||||
enabled: false
|
||||
# Set the namespace the ServiceMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
# Fallback to the prometheus default unless specified
|
||||
# interval: 10s
|
||||
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
# scheme: ""
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
# tlsConfig: {}
|
||||
|
||||
# bearerTokenFile:
|
||||
# Fallback to the prometheus default unless specified
|
||||
# timeout: 30s
|
||||
|
||||
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
labels: {}
|
||||
|
||||
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
annotations: {}
|
||||
|
||||
# Retain the job and instance labels of the metrics pushed to the Pushgateway
|
||||
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
|
||||
honorLabels: true
|
||||
|
||||
## Metric relabel configs to apply to samples before ingestion.
|
||||
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
# sourceLabels: [__name__]
|
||||
|
||||
## Relabel configs to apply to samples before ingestion.
|
||||
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
targetLabels: []
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
# Set the namespace the podMonitor should be deployed
|
||||
# namespace: monitoring
|
||||
# Set how frequently Prometheus should scrape
|
||||
# interval: 30s
|
||||
# Set labels for the podMonitor, use this to define your scrape label for Prometheus Operator
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
# Fallback to the prometheus default unless specified
|
||||
# interval: 10s
|
||||
|
||||
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
|
||||
# scheme: ""
|
||||
|
||||
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
|
||||
## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig
|
||||
# tlsConfig: {}
|
||||
|
||||
# bearerTokenSecret:
|
||||
# Fallback to the prometheus default unless specified
|
||||
# timeout: 30s
|
||||
|
||||
## Used to pass Labels that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
labels: {}
|
||||
|
||||
## Used to pass annotations that are used by the Prometheus installed in your cluster to select Service Monitors to work with
|
||||
## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
|
||||
annotations: {}
|
||||
|
||||
# Retain the job and instance labels of the metrics pushed to the Pushgateway
|
||||
# [Scraping Pushgateway](https://github.com/prometheus/pushgateway#configure-the-pushgateway-as-a-target-to-scrape)
|
||||
honorLabels: true
|
||||
|
||||
## Metric relabel configs to apply to samples before ingestion.
|
||||
## [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs)
|
||||
metricRelabelings: []
|
||||
# - action: keep
|
||||
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
|
||||
# sourceLabels: [__name__]
|
||||
|
||||
## Relabel configs to apply to samples before ingestion.
|
||||
## [Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)
|
||||
relabelings: []
|
||||
# - sourceLabels: [__meta_kubernetes_pod_node_name]
|
||||
# separator: ;
|
||||
# regex: ^(.*)$
|
||||
# targetLabel: nodename
|
||||
# replacement: $1
|
||||
# action: replace
|
||||
|
||||
podTargetLabels: []
|
||||
|
||||
podDisruptionBudget:
|
||||
enabled: false
|
||||
# Set the minimum available replicas
|
||||
# minAvailable: 1
|
||||
|
||||
netpol:
|
||||
enabled: false
|
||||
from: []
|
||||
# - podSelector:
|
||||
# matchLabels:
|
||||
# app.kubernetes.io/name: prometheus
|
||||
to: []
|
||||
|
||||
# Enable vertical pod autoscaler
|
||||
verticalPodAutoscaler:
|
||||
enabled: false
|
||||
|
||||
# Recommender responsible for generating recommendation for the object.
|
||||
# List should be empty (then the default recommender will generate the recommendation)
|
||||
# or contain exactly one recommender.
|
||||
# recommenders:
|
||||
# - name: custom-recommender-performance
|
||||
|
||||
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
|
||||
controlledResources: []
|
||||
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
|
||||
# controlledValues: RequestsAndLimits
|
||||
|
||||
# Define the max allowed resources for the pod
|
||||
maxAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
# Define the min allowed resources for the pod
|
||||
minAllowed: {}
|
||||
# cpu: 200m
|
||||
# memory: 100Mi
|
||||
|
||||
updatePolicy:
|
||||
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
|
||||
# minReplicas: 1
|
||||
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
updateMode: Auto
|
||||
|
||||
volumeMounts: []
|
||||
|
||||
volumes: []
|
||||
|
||||
webhookUrl: ""
|
||||
|
||||
@@ -4,7 +4,5 @@ kind: Kustomization
|
||||
resources:
|
||||
- manifests/clusterrole.yaml
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/role.yaml
|
||||
- manifests/rolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
- manifests/deployment.yaml
|
||||
|
||||
@@ -1,20 +1,9 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -46,3 +35,23 @@ rules:
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- "batch"
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
|
||||
@@ -1,20 +1,9 @@
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -3,18 +3,6 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.117
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -23,44 +11,58 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.117
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.117"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
- image: "ghcr.io/stakater/reloader:v1.2.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
env:
|
||||
- name: GOMAXPROCS
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
resource: limits.cpu
|
||||
divisor: '1'
|
||||
- name: GOMEMLIMIT
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
resource: limits.memory
|
||||
divisor: '1'
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /live
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
initialDelaySeconds: 10
|
||||
securityContext: {}
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 512Mi
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/podmonitor.yaml
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/role.yaml
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/rolebinding.yaml
|
||||
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/service.yaml
|
||||
|
||||
|
||||
@@ -3,14 +3,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
---
|
||||
# Source: reloader/templates/servicemonitor.yaml
|
||||
|
||||
|
||||
@@ -1,109 +1,81 @@
|
||||
---
|
||||
# Source: reloader/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "apps"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- update
|
||||
- patch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
# Source: reloader/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
name: reloader-reloader-role-binding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: reloader-reloader-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
# Source: reloader/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
annotations:
|
||||
meta.helm.sh/release-namespace: "default"
|
||||
meta.helm.sh/release-name: "reloader"
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.117
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -112,44 +84,58 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: reloader-reloader
|
||||
release: "reloader"
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: reloader-reloader
|
||||
chart: "reloader-v0.0.117"
|
||||
release: "reloader"
|
||||
heritage: "Helm"
|
||||
app.kubernetes.io/managed-by: "Helm"
|
||||
group: com.stakater.platform
|
||||
provider: stakater
|
||||
version: v0.0.117
|
||||
spec:
|
||||
containers:
|
||||
- image: "stakater/reloader:v0.0.117"
|
||||
- env:
|
||||
- name: GOMAXPROCS
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
divisor: "1"
|
||||
resource: limits.cpu
|
||||
- name: GOMEMLIMIT
|
||||
valueFrom:
|
||||
resourceFieldRef:
|
||||
divisor: "1"
|
||||
resource: limits.memory
|
||||
image: ghcr.io/stakater/reloader:v1.2.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /live
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
name: reloader-reloader
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
name: http
|
||||
readinessProbe:
|
||||
failureThreshold: 5
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: http
|
||||
timeoutSeconds: 5
|
||||
failureThreshold: 5
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
securityContext:
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: "1"
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 512Mi
|
||||
securityContext: {}
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
serviceAccountName: reloader-reloader
|
||||
|
||||
@@ -23,6 +23,8 @@ reloader:
|
||||
legacy:
|
||||
rbac: false
|
||||
matchLabels: {}
|
||||
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
|
||||
enableMetricsByNamespace: false
|
||||
deployment:
|
||||
replicas: 1
|
||||
nodeSelector:
|
||||
@@ -136,3 +138,5 @@ reloader:
|
||||
# labels:
|
||||
# Set timeout for scrape
|
||||
# timeout: 10s
|
||||
|
||||
webhookUrl: ""
|
||||
|
||||
11
docs-nginx.conf
Normal file
11
docs-nginx.conf
Normal file
@@ -0,0 +1,11 @@
|
||||
server {
|
||||
listen 8080;
|
||||
root /usr/share/nginx/html/;
|
||||
index index.html;
|
||||
error_page 403 404 /404.html;
|
||||
location = /404.html {
|
||||
internal;
|
||||
}
|
||||
# redirects issued by nginx will be relative
|
||||
absolute_redirect off;
|
||||
}
|
||||
18
docs/Alerting.md
Normal file
18
docs/Alerting.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Alerting on Reload
|
||||
|
||||
Reloader can alert when it triggers a rolling upgrade on Deployments or StatefulSets. Webhook notification alert would be sent to the configured webhook server with all the required information.
|
||||
|
||||
## Enabling
|
||||
|
||||
In-order to enable this feature, you need to update the `reloader.env.secret` section of `values.yaml` providing the information needed for alert:
|
||||
|
||||
```yaml
|
||||
ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/teams/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
```
|
||||
|
||||
## Slack Incoming-Webhook Creation Docs
|
||||
|
||||
[Sending messages using Incoming Webhooks](https://api.slack.com/messaging/webhooks)
|
||||
@@ -1,41 +1,53 @@
|
||||
|
||||
# Container Build
|
||||
> **WARNING:** As a user of Reloader there is no need to build containers, these are freely available here: https://hub.docker.com/r/stakater/reloader/
|
||||
|
||||
Multi-architecture approach is based on original work by @mdh02038: https://github.com/mdh02038/Reloader
|
||||
> **WARNING:** As a user of Reloader there is no need to build containers, the open source version is available on [Docker Hub](https://hub.docker.com/r/stakater/reloader/).
|
||||
|
||||
Images tested on linux/arm, linux/arm64 and linux/amd64.
|
||||
Multi-architecture approach is based on original work by [@mdh02038](https://github.com/mdh02038/Reloader).
|
||||
|
||||
# Install Pre-Reqs
|
||||
The build environment requires the following packages (tested on Ubuntu 20.04):
|
||||
* golang
|
||||
* make
|
||||
* qemu (for arm, arm64 etc. emulation)
|
||||
Images are tested on linux/arm, linux/arm64 and linux/amd64.
|
||||
|
||||
## Install Pre-Reqs
|
||||
|
||||
The build environment requires the following packages (tested on `Ubuntu 20.04`):
|
||||
|
||||
* Golang
|
||||
* `make`
|
||||
* `qemu` (for arm, arm64 etc. emulation)
|
||||
* binfmt-support
|
||||
* Docker engine
|
||||
|
||||
## Docker
|
||||
Follow instructions here: https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository
|
||||
|
||||
Follow instructions on [Install using the apt repository](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository).
|
||||
|
||||
Once installed, enable the experimental CLI:
|
||||
```
|
||||
|
||||
```bash
|
||||
export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
```
|
||||
Login, to enable publishing of packages:
|
||||
```
|
||||
|
||||
Login to enable publishing of packages:
|
||||
|
||||
```bash
|
||||
sudo docker login
|
||||
```
|
||||
|
||||
## Remaining Pre-Reqs
|
||||
|
||||
Remaining Pre-Reqs can be installed via:
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo apt install golang make qemu-user-static binfmt-support -y
|
||||
```
|
||||
|
||||
# Publish Multi-Architecture Image
|
||||
## Publish Multi-Architecture Image
|
||||
|
||||
To build/ publish multi-arch Docker images clone repository and execute from repository root:
|
||||
```
|
||||
|
||||
```bash
|
||||
sudo make release-all
|
||||
```
|
||||
|
||||
# Additional Links/ Info
|
||||
* *https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408
|
||||
## Additional Links/Info
|
||||
|
||||
[Building Multi-Architecture Docker Images With `Buildx`](https://medium.com/@artur.klauser/building-multi-architecture-docker-images-with-buildx-27d80f7e2408)
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
# Helm2 to Helm3 Migration
|
||||
|
||||
Follow below mentioned instructions to migrate reloader from Helm2 to Helm3
|
||||
Follow below-mentioned instructions to migrate Reloader from Helm2 to Helm3
|
||||
|
||||
## Instrcutions:
|
||||
## Instructions
|
||||
|
||||
There are 3 steps involved in migrating the reloader from Helm2 to Helm3.
|
||||
There are 3 steps involved in migrating the Reloader from Helm2 to Helm3.
|
||||
|
||||
### Step 1:
|
||||
Install the helm-2to3 plugin
|
||||
### Step 1
|
||||
|
||||
Install the `helm-2to3` plugin
|
||||
|
||||
```bash
|
||||
helm3 plugin install https://github.com/helm/helm-2to3
|
||||
@@ -17,15 +18,18 @@ helm3 2to3 convert <release-name>
|
||||
helm3 2to3 cleanup --release-cleanup --skip-confirmation
|
||||
```
|
||||
|
||||
### Step 2:
|
||||
Add the following Helm3 labels and annotations on reloader resources.
|
||||
### Step 2
|
||||
|
||||
Add the following Helm3 labels and annotations on Reloader resources.
|
||||
|
||||
Label:
|
||||
|
||||
```yaml
|
||||
app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
|
||||
Annotations:
|
||||
|
||||
```yaml
|
||||
meta.helm.sh/release-name=<release-name>
|
||||
meta.helm.sh/release-namespace=<namespace>
|
||||
@@ -51,12 +55,14 @@ kubectl annotate $KIND $NAME meta.helm.sh/release-namespace=$NAMESPACE
|
||||
kubectl label $KIND $NAME app.kubernetes.io/managed-by=Helm
|
||||
```
|
||||
|
||||
### Step 3:
|
||||
### Step 3
|
||||
|
||||
Upgrade to desired version
|
||||
|
||||
```bash
|
||||
helm3 repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm3 repo update
|
||||
|
||||
helm3 upgrade <release-name> stakater/reloader --version=v0.0.72
|
||||
```
|
||||
```
|
||||
|
||||
@@ -1,51 +1,49 @@
|
||||
# How it works?
|
||||
# How Does Reloader Work?
|
||||
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
Reloader watches for `ConfigMap` and `Secret` and detects if there are changes in data of these objects. After change detection Reloader performs rolling upgrade on relevant Pods via associated `Deployment`, `Daemonset` and `Statefulset`.
|
||||
|
||||
## How change detection works
|
||||
## How Does Change Detection Work?
|
||||
|
||||
Reloader watches changes in `configmaps` and `secrets` data. As soon as it detects a change in these. It forwards these objects to an update handler which decides if and how to perform the rolling upgrade.
|
||||
|
||||
## Requirements for rolling upgrade
|
||||
## Requirements for Rolling Upgrade
|
||||
|
||||
To perform rolling upgrade a `deployment`, `daemonset` or `statefulset` must have
|
||||
|
||||
- support for rolling upgrade strategy
|
||||
- specific annotation for `configmaps` or `secrets`
|
||||
|
||||
The annotation value is comma separated list of `configmaps` or `secrets`. If a change is detected in data of these `configmaps` or `secrets`, reloader will perform rolling upgrades on their associated `deployments`, `daemonsets` or `statefulsets`.
|
||||
The annotation value is comma separated list of `configmaps` or `secrets`. If a change is detected in data of these `configmaps` or `secrets`, Reloader will perform rolling upgrades on their associated `deployments`, `daemonsets` or `statefulsets`.
|
||||
|
||||
### Annotation for Configmap
|
||||
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `ConfigMap` called `foo`. Then add this annotation* to your `Deployment`, where the default annotation can be changed with the `--configmap-annotation` flag:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
configmap.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--configmap-annotation` flag</small>
|
||||
|
||||
### Annotation for Secret
|
||||
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation* to your `Deployment`
|
||||
For a `Deployment` called `foo` have a `Secret` called `foo`. Then add this annotation to your `Deployment`, where the default annotation can be changed with the `--secret-annotation` flag:
|
||||
|
||||
```yaml
|
||||
metadata:
|
||||
annotations:
|
||||
secret.reloader.stakater.com/reload: "foo"
|
||||
```
|
||||
<small>*the default annotation can be changed with the `--secret-annotation` flag</small>
|
||||
|
||||
Above mentioned annotation are also work for `Daemonsets` `Statefulsets` and `Rollouts`
|
||||
|
||||
## How Rolling upgrade works?
|
||||
## How Does Rolling Upgrade Work?
|
||||
|
||||
When reloader detects changes in configmap. It gets two objects of configmap. First object is an old configmap object which has a state before the latest change. Second object is new configmap object which contains latest changes. Reloader compares both objects and see whether any change in data occurred or not. If reloader finds any change in new configmap object, only then, it move forward with rolling upgrade.
|
||||
When Reloader detects changes in configmap. It gets two objects of configmap. First object is an old configmap object which has a state before the latest change. Second object is new configmap object which contains latest changes. Reloader compares both objects and see whether any change in data occurred or not. If Reloader finds any change in new configmap object, only then, it moves forward with rolling upgrade.
|
||||
|
||||
After that, reloader gets the list of all deployments, daemonsets and statefulset and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
|
||||
After that, Reloader gets the list of all `deployments`, `daemonsets` and `statefulset` and looks for above mentioned annotation for configmap. If the annotation value contains the configmap name, it then looks for an environment variable which can contain the configmap or secret data change hash.
|
||||
|
||||
### Environment variable for Configmap
|
||||
### Environment Variable for ConfigMap
|
||||
|
||||
If configmap name is foo then
|
||||
|
||||
@@ -53,7 +51,7 @@ If configmap name is foo then
|
||||
STAKATER_FOO_CONFIGMAP
|
||||
```
|
||||
|
||||
### Environment variable for Secret
|
||||
### Environment Variable for Secret
|
||||
|
||||
If Secret name is foo then
|
||||
|
||||
@@ -61,22 +59,24 @@ If Secret name is foo then
|
||||
STAKATER_FOO_SECRET
|
||||
```
|
||||
|
||||
If the environment variable is found then it gets its value and compares it with new configmap hash value. If old value in environment variable is different from new hash value then reloader updates the environment variable. If the environment variable does not exist then it creates a new environment variable with latest hash value from configmap and updates the relevant `deployment`, `daemonset` or `statefulset`
|
||||
If the environment variable is found then it gets its value and compares it with new configmap hash value. If old value in environment variable is different from new hash value then Reloader updates the environment variable. If the environment variable does not exist then it creates a new environment variable with latest hash value from configmap and updates the relevant `deployment`, `daemonset` or `statefulset`
|
||||
|
||||
Note: Rolling upgrade also works in the same way for secrets.
|
||||
|
||||
### Hash value Computation
|
||||
### Hash Value Computation
|
||||
|
||||
Reloader uses SHA1 to compute hash value. SHA1 is used because it is efficient and less prone to collision.
|
||||
|
||||
## Monitor All Namespaces
|
||||
|
||||
By default reloader deploys in default namespace and monitors changes in all namespaces. To monitor changes in a specific namespace deploy the reloader in that namespace and set the `watchGlobally` flag to `false` in values file located under `deployments/kubernetes/chart/reloader`
|
||||
And render manifest file using helm command
|
||||
By default Reloader deploys in default namespace and monitors changes in all namespaces. To monitor changes in a specific namespace deploy the Reloader in that namespace and set the `watchGlobally` flag to `false` in values file located under `deployments/kubernetes/chart/reloader` and render manifest file using helm command:
|
||||
|
||||
```bash
|
||||
helm --namespace {replace this with namespace name} template . > reloader.yaml
|
||||
```
|
||||
The output file can then be used to deploy reloader in specific namespace.
|
||||
|
||||
## Compatibility with helm install and upgrade
|
||||
Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of configmap's or secret's data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`.
|
||||
The output file can then be used to deploy Reloader in specific namespace.
|
||||
|
||||
## Compatibility With Helm Install and Upgrade
|
||||
|
||||
Reloader has no impact on helm deployment cycle. Reloader only injects an environment variable in `deployment`, `daemonset` or `statefulset`. The environment variable contains the SHA1 value of configmap's or secret's data. So if a deployment is created using Helm and Reloader updates the deployment, then next time you upgrade the helm release, Reloader will do nothing except changing that environment variable value in `deployment` , `daemonset` or `statefulset`.
|
||||
|
||||
@@ -5,36 +5,42 @@ Reloader and k8s-trigger-controller are both built for same purpose. So there ar
|
||||
## Similarities
|
||||
|
||||
- Both controllers support change detection in configmap and secrets
|
||||
- Both controllers support deployment rollout
|
||||
- Both controllers support deployment `rollout`
|
||||
- Both controllers use SHA1 for hashing
|
||||
- Both controllers have end to end as well as unit test cases.
|
||||
|
||||
## Differences
|
||||
|
||||
### Support for Daemonsets and Statefulsets.
|
||||
### Support for `Daemonsets` and `Statefulsets`
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller only support for deployment rollout. It does not support daemonsets and statefulsets rollout.
|
||||
#### `k8s-trigger-controller`
|
||||
|
||||
#### Reloader:
|
||||
Reloader supports deployment rollout as well as daemonsets and statefulsets rollout.
|
||||
`k8s-trigger-controller` only support for deployment `rollout`. It does not support `daemonsets` and `statefulsets` `rollout`.
|
||||
|
||||
### Hashing usage
|
||||
#### Reloader
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
|
||||
Reloader supports deployment `rollout` as well as `daemonsets` and `statefulsets` `rollout`.
|
||||
|
||||
### Hashing Usage
|
||||
|
||||
#### `k8s-trigger-controller`
|
||||
|
||||
`k8s-trigger-controller` stores the hash value in an annotation `trigger.k8s.io/[secret|configMap]-NAME-last-hash`
|
||||
|
||||
#### Reloader
|
||||
|
||||
#### Reloader:
|
||||
Reloader stores the hash value in an environment variable `STAKATER_NAME_[SECRET|CONFIGMAP]`
|
||||
|
||||
### Customization
|
||||
|
||||
#### k8s-trigger-controller:
|
||||
k8s-trigger-controller restricts you to using the `trigger.k8s.io/[secret-configMap]-NAME-last-hash` annotation
|
||||
#### `k8s-trigger-controller`
|
||||
|
||||
`k8s-trigger-controller` restricts you to using the `trigger.k8s.io/[secret-configMap]-NAME-last-hash` annotation
|
||||
|
||||
#### Reloader
|
||||
|
||||
#### Reloader:
|
||||
Reloader allows you to customize the annotation to fit your needs with command line flags:
|
||||
|
||||
- `--auto-annotation <annotation>`
|
||||
- `--configmap-annotation <annotation>`
|
||||
- `--secret-annotation <annotation>`
|
||||
- `--secret-annotation <annotation>`
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
Below are the steps to use reloader with Sealed Secrets.
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets).
|
||||
2. Install the controller for sealed secrets
|
||||
3. Fetch the encryption certificate
|
||||
4. Encrypt the secret.
|
||||
5. Apply the secret.
|
||||
7. Install the tool which uses that sealed secret.
|
||||
8. Install Reloader.
|
||||
9. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see reloader working.
|
||||
10. Apply the updated sealed secret.
|
||||
11. Reloader will restart the pod to use that updated secret.
|
||||
# Using Reloader with Sealed Secrets
|
||||
|
||||
Below are the steps to use Reloader with Sealed Secrets:
|
||||
|
||||
1. Download and install the kubeseal client from [here](https://github.com/bitnami-labs/sealed-secrets)
|
||||
1. Install the controller for sealed secrets
|
||||
1. Fetch the encryption certificate
|
||||
1. Encrypt the secret
|
||||
1. Apply the secret
|
||||
1. Install the tool which uses that sealed secret
|
||||
1. Install Reloader
|
||||
1. Once everything is setup, update the original secret at client and encrypt it with kubeseal to see Reloader working
|
||||
1. Apply the updated sealed secret
|
||||
1. Reloader will restart the pod to use that updated secret
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
Reloader's working can be verified by three ways.
|
||||
|
||||
## Verify from logs
|
||||
## Verify From Logs
|
||||
|
||||
Check the logs of reloader and verify that you can see logs looks like below, if you are able to find these logs then it means reloader is working.
|
||||
Check the logs of Reloader and verify that you can see logs looks like below, if you are able to find these logs then it means Reloader is working.
|
||||
|
||||
```text
|
||||
Changes Detected in test-object of type 'SECRET' in namespace: test-reloader
|
||||
@@ -14,48 +14,62 @@ Updated test-resource of type Deployment in namespace: test-reloader
|
||||
|
||||
Below are the details that explain these logs:
|
||||
|
||||
### test-object
|
||||
### `test-object`
|
||||
|
||||
`test-object` is the name of a `secret` or a `deployment` in which change has been detected.
|
||||
`test-object` is the name of a `secret` or a `configmap` in which change has been detected.
|
||||
|
||||
### SECRET
|
||||
### `SECRET`
|
||||
|
||||
`SECRET` is the type of `test-object`. It can either be `SECRET` or `CONFIGMAP`
|
||||
|
||||
### test-reloader
|
||||
### `test-reloader`
|
||||
|
||||
`test-reloader` is the name of namespace in which reloader has detected the change.
|
||||
`test-reloader` is the name of namespace in which Reloader has detected the change.
|
||||
|
||||
### test-resource
|
||||
### `test-resource`
|
||||
|
||||
`test-resource` is the name of resource which is going to be updated
|
||||
|
||||
### Deployment
|
||||
### `Deployment`
|
||||
|
||||
`Deployment` is the type of `test-resource`. It can either be a `Deployment`, `Daemonset` or `Statefulset`
|
||||
|
||||
## Verify by checking the age of Pod
|
||||
## Verify by Checking the Age of Pod
|
||||
|
||||
A pod's age can tell whether reloader is working correctly or not. If you know that a change in a `secret` or `configmap` has occurred, then check the relevant Pod's age immediately. It should be newly created few moments ago.
|
||||
A pod's age can tell whether Reloader is working correctly or not. If you know that a change in a `secret` or `configmap` has occurred, then check the relevant Pod's age immediately. It should be newly created few moments ago.
|
||||
|
||||
### Verify from kubernetes Dashboard
|
||||
### Verify from Kubernetes Dashboard
|
||||
|
||||
`kubernetes dashboard` can be used to verify the working of Reloader. After a change in `secret` or `configmap`, check the relevant Pod's age from dashboard. It should be newly created few moments ago.
|
||||
|
||||
### Verify from command line
|
||||
### Verify from Command Line
|
||||
|
||||
After a change in `secret` or `configmap`. Run the below mentioned command and verify that the pod is newly created.
|
||||
After a change in `secret` or `configmap`. Run the below-mentioned command and verify that the pod is newly created.
|
||||
|
||||
```bash
|
||||
kubectl get pods <pod name> -n <namespace name>
|
||||
```
|
||||
|
||||
## Verify from metrics
|
||||
Some metrics are exported to prometheus endpoint `/metrics` on port `9090`.
|
||||
## Verify From Metrics
|
||||
|
||||
When reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
|
||||
Some metrics are exported to Prometheus endpoint `/metrics` on port `9090`.
|
||||
|
||||
```
|
||||
When Reloader is unable to reload, `reloader_reload_executed_total{success="false"}` metric gets incremented and when it reloads successfully, `reloader_reload_executed_total{success="true"}` gets incremented. You will be able to see the following metrics, with some other metrics, at `/metrics` endpoint.
|
||||
|
||||
```text
|
||||
reloader_reload_executed_total{success="false"} 15
|
||||
reloader_reload_executed_total{success="true"} 12
|
||||
```
|
||||
|
||||
### Reloads by Namespace
|
||||
|
||||
Reloader can also export a metric to show the number of reloads by namespace. This feature is disabled by default, as it can lead to high cardinality in clusters with many namespaces.
|
||||
|
||||
The metric will have both `success` and `namespace` as attributes:
|
||||
|
||||
```text
|
||||
reloader_reload_executed_total{success="false", namespace="some-namespace"} 2
|
||||
reloader_reload_executed_total{success="true", namespace="some-namespace"} 1
|
||||
```
|
||||
|
||||
To opt in, set the environment variable `METRICS_COUNT_BY_NAMESPACE` to `enabled` or set the Helm value `reloader.enableMetricsByNamespace` to `true`.
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
# Features
|
||||
|
||||
These are the key features of Reloader:
|
||||
|
||||
1. Restart pod in a deployment on change in linked/related configmap's or secret's
|
||||
2. Restart pod in a daemonset on change in linked/related configmap's or secret's
|
||||
3. Restart pod in a statefulset on change in linked/related configmap's or secret's
|
||||
4. Restart pod in a rollout on change in linked/related configmap's or secret's
|
||||
12
docs/index.md
Normal file
12
docs/index.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Introduction
|
||||
|
||||
Reloader can watch changes in `ConfigMap` and `Secret` and do rolling upgrades on Pods with their associated `DeploymentConfigs`, `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts`.
|
||||
|
||||
These are the key features of Reloader:
|
||||
|
||||
1. Restart pod in a `deployment` on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a `daemonset` on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a `statefulset` on change in linked/related configmap's or secret's
|
||||
1. Restart pod in a `rollout` on change in linked/related configmap's or secret's
|
||||
|
||||
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
|
||||
151
go.mod
151
go.mod
@@ -1,98 +1,93 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.18
|
||||
go 1.23.1
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.0.2
|
||||
github.com/openshift/api v0.0.0-20210527122704-efd9d5958e01
|
||||
github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.3
|
||||
k8s.io/api v0.21.2
|
||||
k8s.io/apimachinery v0.21.2
|
||||
k8s.io/client-go v0.21.2
|
||||
github.com/argoproj/argo-rollouts v1.7.2
|
||||
github.com/openshift/api v0.0.0-20240131175612-92fe66c75e8f
|
||||
github.com/openshift/client-go v0.0.0-20231110140829-a6ca51f6d5ba
|
||||
github.com/parnurzeal/gorequest v0.3.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
k8s.io/api v0.29.3
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
k8s.io/kubectl v0.29.3
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.3 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.3 // indirect
|
||||
github.com/go-openapi/spec v0.19.3 // indirect
|
||||
github.com/go-openapi/swag v0.19.5 // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.4.3 // indirect
|
||||
github.com/google/go-cmp v0.5.4 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gnostic v0.4.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/imdario/mergo v0.3.11 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.10 // indirect
|
||||
github.com/mailru/easyjson v0.7.0 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/onsi/ginkgo v1.15.1 // indirect
|
||||
github.com/onsi/gomega v1.11.0 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.21.0 // indirect
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f // indirect
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
||||
golang.org/x/sys v0.0.0-20210309074719-68d13333faf2 // indirect
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 // indirect
|
||||
golang.org/x/text v0.3.4 // indirect
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/protobuf v1.25.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.22.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.8.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20210216185858-15cd8face8d6 // indirect
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
github.com/grpc-ecosystem/grpc-gateway => github.com/grpc-ecosystem/grpc-gateway v1.16.0
|
||||
k8s.io/api => k8s.io/api v0.20.4
|
||||
k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.21.0-alpha.0
|
||||
k8s.io/apiserver => k8s.io/apiserver v0.20.4
|
||||
k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.20.4
|
||||
k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.4
|
||||
k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.4
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.20.5-rc.0
|
||||
k8s.io/component-base => k8s.io/component-base v0.20.4
|
||||
k8s.io/component-helpers => k8s.io/component-helpers v0.20.4
|
||||
k8s.io/controller-manager => k8s.io/controller-manager v0.20.4
|
||||
k8s.io/cri-api => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.4
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.4
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.4
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.4
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.4
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.20.4
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.20.4
|
||||
k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.4
|
||||
k8s.io/metrics => k8s.io/metrics v0.20.4
|
||||
k8s.io/mount-utils => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.4
|
||||
k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.20.4
|
||||
k8s.io/sample-controller => k8s.io/sample-controller v0.20.4
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20201130134442-10cb98267c6c
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.28.4
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.28.4
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.27.4
|
||||
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
|
||||
k8s.io/csi-translation-lib v0.0.0 => k8s.io/csi-translation-lib v0.24.2
|
||||
k8s.io/kube-aggregator v0.0.0 => k8s.io/kube-aggregator v0.24.2
|
||||
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.27.1
|
||||
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
|
||||
k8s.io/sample-apiserver v0.0.0 => k8s.io/sample-apiserver v0.24.2
|
||||
k8s.io/sample-cli-plugin v0.0.0 => k8s.io/sample-cli-plugin v0.24.2
|
||||
k8s.io/sample-controller v0.0.0 => k8s.io/sample-controller v0.24.2
|
||||
)
|
||||
|
||||
119
internal/pkg/alerts/alert.go
Normal file
119
internal/pkg/alerts/alert.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package alert
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// function to send alert msg to webhook service
|
||||
func SendWebhookAlert(msg string) {
|
||||
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
|
||||
if !ok {
|
||||
logrus.Error("ALERT_WEBHOOK_URL env variable not provided")
|
||||
return
|
||||
}
|
||||
webhook_url = strings.TrimSpace(webhook_url)
|
||||
alert_sink := os.Getenv("ALERT_SINK")
|
||||
alert_sink = strings.ToLower(strings.TrimSpace(alert_sink))
|
||||
|
||||
// Provision to add Proxy to reach webhook server if required
|
||||
webhook_proxy := os.Getenv("ALERT_WEBHOOK_PROXY")
|
||||
webhook_proxy = strings.TrimSpace(webhook_proxy)
|
||||
|
||||
// Provision to add Additional information in the alert. e.g ClusterName
|
||||
alert_additional_info, ok := os.LookupEnv("ALERT_ADDITIONAL_INFO")
|
||||
if ok {
|
||||
alert_additional_info = strings.TrimSpace(alert_additional_info)
|
||||
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
|
||||
}
|
||||
|
||||
if alert_sink == "slack" {
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else if alert_sink == "teams" {
|
||||
sendTeamsAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// function to handle server redirection
|
||||
func redirectPolicy(req gorequest.Request, via []gorequest.Request) error {
|
||||
return fmt.Errorf("incorrect token (redirection)")
|
||||
}
|
||||
|
||||
// function to send alert to slack
|
||||
func sendSlackAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
attachment := Attachment{
|
||||
Text: msg,
|
||||
Color: "good",
|
||||
AuthorName: "Reloader",
|
||||
}
|
||||
|
||||
payload := WebhookMessage{
|
||||
Attachments: []Attachment{attachment},
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to Microsoft Teams webhook
|
||||
func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
attachment := Attachment{
|
||||
Text: msg,
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(attachment).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
Type("text").
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(msg).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
61
internal/pkg/alerts/slack_alert.go
Normal file
61
internal/pkg/alerts/slack_alert.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package alert
|
||||
|
||||
type WebhookMessage struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
IconEmoji string `json:"icon_emoji,omitempty"`
|
||||
IconURL string `json:"icon_url,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
ThreadTimestamp string `json:"thread_ts,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
Attachments []Attachment `json:"attachments,omitempty"`
|
||||
Parse string `json:"parse,omitempty"`
|
||||
ResponseType string `json:"response_type,omitempty"`
|
||||
ReplaceOriginal bool `json:"replace_original,omitempty"`
|
||||
DeleteOriginal bool `json:"delete_original,omitempty"`
|
||||
ReplyBroadcast bool `json:"reply_broadcast,omitempty"`
|
||||
}
|
||||
|
||||
type Attachment struct {
|
||||
Color string `json:"color,omitempty"`
|
||||
Fallback string `json:"fallback,omitempty"`
|
||||
|
||||
CallbackID string `json:"callback_id,omitempty"`
|
||||
ID int `json:"id,omitempty"`
|
||||
|
||||
AuthorID string `json:"author_id,omitempty"`
|
||||
AuthorName string `json:"author_name,omitempty"`
|
||||
AuthorSubname string `json:"author_subname,omitempty"`
|
||||
AuthorLink string `json:"author_link,omitempty"`
|
||||
AuthorIcon string `json:"author_icon,omitempty"`
|
||||
|
||||
Title string `json:"title,omitempty"`
|
||||
TitleLink string `json:"title_link,omitempty"`
|
||||
Pretext string `json:"pretext,omitempty"`
|
||||
Text string `json:"text,omitempty"`
|
||||
|
||||
ImageURL string `json:"image_url,omitempty"`
|
||||
ThumbURL string `json:"thumb_url,omitempty"`
|
||||
|
||||
ServiceName string `json:"service_name,omitempty"`
|
||||
ServiceIcon string `json:"service_icon,omitempty"`
|
||||
FromURL string `json:"from_url,omitempty"`
|
||||
OriginalURL string `json:"original_url,omitempty"`
|
||||
|
||||
MarkdownIn []string `json:"mrkdwn_in,omitempty"`
|
||||
|
||||
Footer string `json:"footer,omitempty"`
|
||||
FooterIcon string `json:"footer_icon,omitempty"`
|
||||
}
|
||||
|
||||
type Field struct {
|
||||
Title string `json:"title"`
|
||||
Value string `json:"value"`
|
||||
Short bool `json:"short"`
|
||||
}
|
||||
|
||||
type Action struct {
|
||||
Type string `json:"type"`
|
||||
Text string `json:"text"`
|
||||
Url string `json:"url"`
|
||||
Style string `json:"style"`
|
||||
}
|
||||
@@ -2,39 +2,44 @@ package callbacks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
"fmt"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
openshiftv1 "github.com/openshift/api/apps/v1"
|
||||
)
|
||||
|
||||
//ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []interface{}
|
||||
// ItemsFunc is a generic function to return a specific resource array in given namespace
|
||||
type ItemsFunc func(kube.Clients, string) []runtime.Object
|
||||
|
||||
//ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(interface{}) []v1.Container
|
||||
// ContainersFunc is a generic func to return containers
|
||||
type ContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(interface{}) []v1.Container
|
||||
// InitContainersFunc is a generic func to return containers
|
||||
type InitContainersFunc func(runtime.Object) []v1.Container
|
||||
|
||||
//VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(interface{}) []v1.Volume
|
||||
// VolumesFunc is a generic func to return volumes
|
||||
type VolumesFunc func(runtime.Object) []v1.Volume
|
||||
|
||||
//UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, interface{}) error
|
||||
// UpdateFunc performs the resource update
|
||||
type UpdateFunc func(kube.Clients, string, runtime.Object) error
|
||||
|
||||
//AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(interface{}) map[string]string
|
||||
// AnnotationsFunc is a generic func to return annotations
|
||||
type AnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(interface{}) map[string]string
|
||||
// PodAnnotationsFunc is a generic func to return annotations
|
||||
type PodAnnotationsFunc func(runtime.Object) map[string]string
|
||||
|
||||
//RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
// RollingUpgradeFuncs contains generic functions to perform rolling upgrade
|
||||
type RollingUpgradeFuncs struct {
|
||||
ItemsFunc ItemsFunc
|
||||
AnnotationsFunc AnnotationsFunc
|
||||
@@ -47,250 +52,317 @@ type RollingUpgradeFuncs struct {
|
||||
}
|
||||
|
||||
// GetDeploymentItems returns the deployments in given namespace
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deployments, err := clients.KubernetesClient.AppsV1().Deployments(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deployments %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(deployments.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &deployments.Items[i]
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(deployments.Items)
|
||||
return items
|
||||
}
|
||||
|
||||
// GetCronJobItems returns the jobs in given namespace
|
||||
func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
cronjobs, err := clients.KubernetesClient.BatchV1().CronJobs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list cronjobs %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(cronjobs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range cronjobs.Items {
|
||||
if v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
cronjobs.Items[i].Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
}
|
||||
items[i] = &cronjobs.Items[i]
|
||||
}
|
||||
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDaemonSetItems returns the daemonSets in given namespace
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
daemonSets, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list daemonSets %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(daemonSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &daemonSets.Items[i]
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(daemonSets.Items)
|
||||
return items
|
||||
}
|
||||
|
||||
// GetStatefulSetItems returns the statefulSets in given namespace
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
statefulSets, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list statefulSets %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(statefulSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &statefulSets.Items[i]
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(statefulSets.Items)
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentConfigItems returns the deploymentConfigs in given namespace
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetDeploymentConfigItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
deploymentConfigs, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list deploymentConfigs %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(deploymentConfigs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deploymentConfigs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
deploymentConfigs.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &deploymentConfigs.Items[i]
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(deploymentConfigs.Items)
|
||||
return items
|
||||
}
|
||||
|
||||
// GetRolloutItems returns the rollouts in given namespace
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []interface{} {
|
||||
func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
rollouts, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).List(context.TODO(), meta_v1.ListOptions{})
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to list Rollouts %v", err)
|
||||
}
|
||||
|
||||
items := make([]runtime.Object, len(rollouts.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &rollouts.Items[i]
|
||||
}
|
||||
|
||||
return util.InterfaceSlice(rollouts.Items)
|
||||
return items
|
||||
}
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).ObjectMeta.Annotations
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetCronJobAnnotations returns the annotations of given cronjob
|
||||
func GetCronJobAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigAnnotations returns the annotations of given deploymentConfig
|
||||
func GetDeploymentConfigAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
func GetDeploymentConfigAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
|
||||
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentConfigPodAnnotations returns the pod's annotations of given deploymentConfig
|
||||
func GetDeploymentConfigPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
func GetDeploymentConfigPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item interface{}) map[string]string {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
func GetDeploymentContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
func GetDeploymentContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetCronJobContainers returns the containers of given cronjob
|
||||
func GetCronJobContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDaemonSetContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
func GetDaemonSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetStatefulSetContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
func GetStatefulSetContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
func GetDeploymentConfigContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetRolloutContainers returns the containers of given rollout
|
||||
func GetRolloutContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
func GetRolloutContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Containers
|
||||
}
|
||||
|
||||
// GetDeploymentInitContainers returns the containers of given deployment
|
||||
func GetDeploymentInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetCronJobInitContainers returns the containers of given cronjob
|
||||
func GetCronJobInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDaemonSetInitContainers returns the containers of given daemonSet
|
||||
func GetDaemonSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
func GetDaemonSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetStatefulSetInitContainers returns the containers of given statefulSet
|
||||
func GetStatefulSetInitContainers(item interface{}) []v1.Container {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
func GetStatefulSetInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetDeploymentConfigInitContainers returns the containers of given deploymentConfig
|
||||
func GetDeploymentConfigInitContainers(item interface{}) []v1.Container {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
func GetDeploymentConfigInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// GetRolloutInitContainers returns the containers of given rollout
|
||||
func GetRolloutInitContainers(item interface{}) []v1.Container {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
func GetRolloutInitContainers(item runtime.Object) []v1.Container {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.InitContainers
|
||||
}
|
||||
|
||||
// UpdateDeployment performs rolling upgrade on deployment
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deployment := resource.(appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), &deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDeployment(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deployment := resource.(*appsv1.Deployment)
|
||||
_, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateJobFromCronjob performs rolling upgrade on cronjob
|
||||
func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
cronJob := resource.(*batchv1.CronJob)
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: cronJob.Spec.JobTemplate.ObjectMeta,
|
||||
Spec: cronJob.Spec.JobTemplate.Spec,
|
||||
}
|
||||
job.GenerateName = cronJob.Name + "-"
|
||||
_, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDaemonSet performs rolling upgrade on daemonSet
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
daemonSet := resource.(appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), &daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDaemonSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
daemonSet := resource.(*appsv1.DaemonSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().DaemonSets(namespace).Update(context.TODO(), daemonSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateStatefulSet performs rolling upgrade on statefulSet
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
statefulSet := resource.(appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), &statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateStatefulSet(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
statefulSet := resource.(*appsv1.StatefulSet)
|
||||
_, err := clients.KubernetesClient.AppsV1().StatefulSets(namespace).Update(context.TODO(), statefulSet, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateDeploymentConfig performs rolling upgrade on deploymentConfig
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
deploymentConfig := resource.(openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), &deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
func UpdateDeploymentConfig(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
deploymentConfig := resource.(*openshiftv1.DeploymentConfig)
|
||||
_, err := clients.OpenshiftAppsClient.AppsV1().DeploymentConfigs(namespace).Update(context.TODO(), deploymentConfig, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateRollout performs rolling upgrade on rollout
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource interface{}) error {
|
||||
rollout := resource.(argorolloutv1alpha1.Rollout)
|
||||
func UpdateRollout(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
rollout := resource.(*argorolloutv1alpha1.Rollout)
|
||||
rolloutBefore, _ := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Get(context.TODO(), rollout.Name, meta_v1.GetOptions{})
|
||||
logrus.Warnf("Before: %+v", rolloutBefore.Spec.Template.Spec.Containers[0].Env)
|
||||
logrus.Warnf("After: %+v", rollout.Spec.Template.Spec.Containers[0].Env)
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Update(context.TODO(), &rollout, meta_v1.UpdateOptions{FieldManager: "Reloader"})
|
||||
_, err := clients.ArgoRolloutClient.ArgoprojV1alpha1().Rollouts(namespace).Patch(context.TODO(), rollout.Name, patchtypes.MergePatchType, []byte(fmt.Sprintf(`{"spec": {"restartAt": "%s"}}`, time.Now().Format(time.RFC3339))), meta_v1.PatchOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeploymentVolumes returns the Volumes of given deployment
|
||||
func GetDeploymentVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.Deployment).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetCronJobVolumes returns the Volumes of given cronjob
|
||||
func GetCronJobVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDaemonSetVolumes returns the Volumes of given daemonSet
|
||||
func GetDaemonSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
func GetDaemonSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetStatefulSetVolumes returns the Volumes of given statefulSet
|
||||
func GetStatefulSetVolumes(item interface{}) []v1.Volume {
|
||||
return item.(appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
func GetStatefulSetVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetDeploymentConfigVolumes returns the Volumes of given deploymentConfig
|
||||
func GetDeploymentConfigVolumes(item interface{}) []v1.Volume {
|
||||
return item.(openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
func GetDeploymentConfigVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*openshiftv1.DeploymentConfig).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
// GetRolloutVolumes returns the Volumes of given rollout
|
||||
func GetRolloutVolumes(item interface{}) []v1.Volume {
|
||||
return item.(argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
func GetRolloutVolumes(item runtime.Object) []v1.Volume {
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Spec.Volumes
|
||||
}
|
||||
|
||||
@@ -1,20 +1,26 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/leadership"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// NewReloaderCommand starts the reloader controller
|
||||
@@ -27,35 +33,57 @@ func NewReloaderCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmapts to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validateFlags(*cobra.Command, []string) error {
|
||||
// Ensure the reload strategy is one of the following...
|
||||
var validReloadStrategy bool
|
||||
valid := []string{constants.EnvVarsReloadStrategy, constants.AnnotationsReloadStrategy}
|
||||
for _, s := range valid {
|
||||
if s == options.ReloadStrategy {
|
||||
return nil
|
||||
validReloadStrategy = true
|
||||
}
|
||||
}
|
||||
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
if !validReloadStrategy {
|
||||
err := fmt.Sprintf("%s must be one of: %s", constants.ReloadStrategyFlag, strings.Join(valid, ", "))
|
||||
return errors.New(err)
|
||||
}
|
||||
|
||||
// Validate that HA options are correct
|
||||
if options.EnableHA {
|
||||
if err := validateHAEnvs(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLogging(logFormat string) error {
|
||||
func configureLogging(logFormat, logLevel string) error {
|
||||
switch logFormat {
|
||||
case "json":
|
||||
logrus.SetFormatter(&logrus.JSONFormatter{})
|
||||
@@ -65,11 +93,36 @@ func configureLogging(logFormat string) error {
|
||||
return fmt.Errorf("unsupported logging formatter: %q", logFormat)
|
||||
}
|
||||
}
|
||||
// set log level
|
||||
level, err := logrus.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.SetLevel(level)
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateHAEnvs() error {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
|
||||
if podName == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNameEnv, constants.PodNameEnv)
|
||||
}
|
||||
if podNamespace == "" {
|
||||
return fmt.Errorf("%s not set, cannot run in HA mode without %s set", constants.PodNamespaceEnv, constants.PodNamespaceEnv)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHAEnvs() (string, string) {
|
||||
podName := os.Getenv(constants.PodNameEnv)
|
||||
podNamespace := os.Getenv(constants.PodNamespaceEnv)
|
||||
|
||||
return podName, podNamespace
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
err := configureLogging(options.LogFormat)
|
||||
err := configureLogging(options.LogFormat, options.LogLevel)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
@@ -97,18 +150,47 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
resourceLabelSelector, err := getResourceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
if len(namespaceLabelSelector) > 0 {
|
||||
logrus.Warnf("namespace-selector is set, will only detect changes in namespaces with these labels: %s.", namespaceLabelSelector)
|
||||
}
|
||||
|
||||
if len(resourceLabelSelector) > 0 {
|
||||
logrus.Warnf("resource-label-selector is set, will only detect changes on resources with these labels: %s.", resourceLabelSelector)
|
||||
}
|
||||
|
||||
if options.WebhookUrl != "" {
|
||||
logrus.Warnf("webhook-url is set, will only send webhook, no resources will be reloaded")
|
||||
}
|
||||
|
||||
collectors := metrics.SetupPrometheusEndpoint()
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
if ignoredResourcesList.Contains(k) {
|
||||
if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") {
|
||||
continue
|
||||
}
|
||||
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, collectors)
|
||||
c, err := controller.NewController(clientset, k, currentNamespace, ignoredNamespacesList, namespaceLabelSelector, resourceLabelSelector, collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
|
||||
// If HA is enabled we only run the controller when
|
||||
if options.EnableHA {
|
||||
continue
|
||||
}
|
||||
// Now let's start the controller
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
@@ -116,14 +198,91 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go c.Run(1, stop)
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
select {}
|
||||
// Run leadership election
|
||||
if options.EnableHA {
|
||||
podName, podNamespace := getHAEnvs()
|
||||
lock := leadership.GetNewLock(clientset.CoordinationV1(), constants.LockName, podName, podNamespace)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
leadership.SetupLivenessEndpoint()
|
||||
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
}
|
||||
|
||||
func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package constants
|
||||
|
||||
const (
|
||||
// DefaultHttpListenAddr is the default listening address for global http server
|
||||
DefaultHttpListenAddr = ":9090"
|
||||
|
||||
// ConfigmapEnvVarPostfix is a postfix for configmap envVar
|
||||
ConfigmapEnvVarPostfix = "CONFIGMAP"
|
||||
// SecretEnvVarPostfix is a postfix for secret envVar
|
||||
@@ -20,3 +23,10 @@ const (
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
const (
|
||||
LockName string = "stakater-reloader-lock"
|
||||
PodNameEnv string = "POD_NAME"
|
||||
PodNamespaceEnv string = "POD_NAMESPACE"
|
||||
)
|
||||
|
||||
@@ -2,22 +2,26 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
// Controller for checking events
|
||||
@@ -27,25 +31,55 @@ type Controller struct {
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace string
|
||||
resource string
|
||||
ignoredNamespaces util.List
|
||||
collectors metrics.Collectors
|
||||
recorder record.EventRecorder
|
||||
namespaceSelector string
|
||||
resourceSelector string
|
||||
}
|
||||
|
||||
// controllerInitialized flag determines whether controlled is being initialized
|
||||
var controllerInitialized bool = false
|
||||
var secretControllerInitialized bool = false
|
||||
var configmapControllerInitialized bool = false
|
||||
var selectedNamespacesCache []string
|
||||
|
||||
// NewController for initializing a Controller
|
||||
func NewController(
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, collectors metrics.Collectors) (*Controller, error) {
|
||||
client kubernetes.Interface, resource string, namespace string, ignoredNamespaces []string, namespaceLabelSelector string, resourceLabelSelector string, collectors metrics.Collectors) (*Controller, error) {
|
||||
|
||||
if options.SyncAfterRestart {
|
||||
secretControllerInitialized = true
|
||||
configmapControllerInitialized = true
|
||||
}
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
namespace: namespace,
|
||||
ignoredNamespaces: ignoredNamespaces,
|
||||
namespaceSelector: namespaceLabelSelector,
|
||||
resourceSelector: resourceLabelSelector,
|
||||
resource: resource,
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{
|
||||
Interface: client.CoreV1().Events(""),
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("reloader-%s", resource)})
|
||||
|
||||
queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
|
||||
listWatcher := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, fields.Everything())
|
||||
|
||||
optionsModifier := func(options *metav1.ListOptions) {
|
||||
if resource == "namespaces" {
|
||||
options.LabelSelector = c.namespaceSelector
|
||||
} else if len(c.resourceSelector) > 0 {
|
||||
options.LabelSelector = c.resourceSelector
|
||||
} else {
|
||||
options.FieldSelector = fields.Everything().String()
|
||||
}
|
||||
}
|
||||
|
||||
listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier)
|
||||
|
||||
indexer, informer := cache.NewIndexerInformer(listWatcher, kube.ResourceMap[resource], 0, cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.Add,
|
||||
@@ -56,16 +90,27 @@ func NewController(
|
||||
c.informer = informer
|
||||
c.queue = queue
|
||||
c.collectors = collectors
|
||||
c.recorder = recorder
|
||||
|
||||
logrus.Infof("created controller for: %s", resource)
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
|
||||
switch object := obj.(type) {
|
||||
case *v1.Namespace:
|
||||
c.addSelectedNamespaceToCache(*object)
|
||||
return
|
||||
}
|
||||
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && controllerInitialized {
|
||||
if !c.resourceInIgnoredNamespace(obj) && c.resourceInSelectedNamespaces(obj) && secretControllerInitialized && configmapControllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -81,23 +126,77 @@ func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool {
|
||||
if len(c.namespaceSelector) == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
switch object := raw.(type) {
|
||||
case *v1.ConfigMap:
|
||||
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
|
||||
return true
|
||||
}
|
||||
case *v1.Secret:
|
||||
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *Controller) addSelectedNamespaceToCache(namespace v1.Namespace) {
|
||||
selectedNamespacesCache = append(selectedNamespacesCache, namespace.GetName())
|
||||
logrus.Infof("added namespace to be watched: %s", namespace.GetName())
|
||||
}
|
||||
|
||||
func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) {
|
||||
for i, v := range selectedNamespacesCache {
|
||||
if v == namespace.GetName() {
|
||||
selectedNamespacesCache = append(selectedNamespacesCache[:i], selectedNamespacesCache[i+1:]...)
|
||||
logrus.Infof("removed namespace from watch: %s", namespace.GetName())
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
if !c.resourceInIgnoredNamespace(new) {
|
||||
switch new.(type) {
|
||||
case *v1.Namespace:
|
||||
return
|
||||
}
|
||||
|
||||
if !c.resourceInIgnoredNamespace(new) && c.resourceInSelectedNamespaces(new) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
// Todo: Any future delete event can be handled here
|
||||
|
||||
if options.ReloadOnDelete == "true" {
|
||||
if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized {
|
||||
c.queue.Add(handler.ResourceDeleteHandler{
|
||||
Resource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
switch object := old.(type) {
|
||||
case *v1.Namespace:
|
||||
c.removeSelectedNamespaceFromCache(*object)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//Run function for controller which handles the queue
|
||||
// Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
|
||||
@@ -122,7 +221,11 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
controllerInitialized = true
|
||||
if c.resource == "secrets" {
|
||||
secretControllerInitialized = true
|
||||
} else if c.resource == "configMaps" {
|
||||
configmapControllerInitialized = true
|
||||
}
|
||||
|
||||
for c.processNextItem() {
|
||||
}
|
||||
@@ -169,5 +272,6 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
c.queue.Forget(key)
|
||||
// Report to an external entity that, even after several retries, we could not successfully process this key
|
||||
runtime.HandleError(err)
|
||||
logrus.Infof("Dropping the key %q out of the queue: %v", key, err)
|
||||
logrus.Errorf("Dropping key out of the queue: %v", err)
|
||||
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -14,7 +16,10 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
@@ -40,7 +45,10 @@ func TestMain(m *testing.M) {
|
||||
|
||||
logrus.Infof("Creating controller")
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, collectors)
|
||||
if k == "namespaces" {
|
||||
continue
|
||||
}
|
||||
c, err := NewController(clients.KubernetesClient, k, namespace, []string{}, "", "", collectors)
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
@@ -2279,3 +2287,194 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_resourceInNamespaceSelector(t *testing.T) {
|
||||
type fields struct {
|
||||
indexer cache.Indexer
|
||||
queue workqueue.RateLimitingInterface
|
||||
informer cache.Controller
|
||||
namespace v1.Namespace
|
||||
namespaceSelector string
|
||||
}
|
||||
type args struct {
|
||||
raw interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "TestConfigMapResourceInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: "select=this,select2=this2",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetConfigmap("selected-namespace", "testcm", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestConfigMapResourceNotInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: "select=this,select2=this2",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-selected-namespace",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetConfigmap("not-selected-namespace", "testcm", "test"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "TestSecretResourceInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: "select=this,select2=this2",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "testsecret", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestSecretResourceNotInNamespaceSelector",
|
||||
fields: fields{
|
||||
namespaceSelector: "select=this,select2=this2",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "not-selected-namespace",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("not-selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: false,
|
||||
}, {
|
||||
name: "TestSecretResourceInNamespaceSelectorKeyExists",
|
||||
fields: fields{
|
||||
namespaceSelector: "select",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestSecretResourceInNamespaceSelectorValueIn",
|
||||
fields: fields{
|
||||
namespaceSelector: "select in (select1, select2, select3)",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "select2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestSecretResourceInNamespaceSelectorKeyDoesNotExist",
|
||||
fields: fields{
|
||||
namespaceSelector: "!select2",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: true,
|
||||
}, {
|
||||
name: "TestSecretResourceInNamespaceSelectorMultipleConditions",
|
||||
fields: fields{
|
||||
namespaceSelector: "select,select2=this2,select3!=this4",
|
||||
namespace: v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "selected-namespace",
|
||||
Labels: map[string]string{
|
||||
"select": "this",
|
||||
"select2": "this2",
|
||||
"select3": "this3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
raw: testutil.GetSecret("selected-namespace", "secret", "test"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{})
|
||||
logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name)
|
||||
|
||||
c := &Controller{
|
||||
client: fakeClient,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace.ObjectMeta.Name,
|
||||
namespaceSelector: tt.fields.namespaceSelector,
|
||||
}
|
||||
|
||||
listOptions := metav1.ListOptions{}
|
||||
listOptions.LabelSelector = tt.fields.namespaceSelector
|
||||
namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions)
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
c.addSelectedNamespaceToCache(ns)
|
||||
}
|
||||
|
||||
if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
c.removeSelectedNamespaceFromCache(ns)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,14 +3,17 @@ package handler
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
@@ -19,8 +22,12 @@ func (r ResourceCreatedHandler) Handle() error {
|
||||
logrus.Errorf("Resource creation handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
92
internal/pkg/handler/delete.go
Normal file
92
internal/pkg/handler/delete.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceDeleteHandler contains new objects
|
||||
type ResourceDeleteHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes resources being deleted
|
||||
func (r ResourceDeleteHandler) Handle() error {
|
||||
if r.Resource == nil {
|
||||
logrus.Errorf("Resource delete handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
config.SHAValue = testutil.GetSHAfromEmptyData()
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
}
|
||||
|
||||
//remove if env var exists
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
for i := range containers {
|
||||
envs := containers[i].Env
|
||||
index := -1
|
||||
for j := range envs {
|
||||
if envs[j].Name == envVar {
|
||||
index = j
|
||||
break
|
||||
}
|
||||
}
|
||||
if index != -1 {
|
||||
containers[i].Env = append(containers[i].Env[:index], containers[i].Env[index+1:]...)
|
||||
return constants.Updated
|
||||
}
|
||||
}
|
||||
|
||||
return constants.NotUpdated
|
||||
}
|
||||
@@ -3,8 +3,10 @@ package handler
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
@@ -12,6 +14,7 @@ type ResourceUpdatedHandler struct {
|
||||
Resource interface{}
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
@@ -21,8 +24,12 @@ func (r ResourceUpdatedHandler) Handle() error {
|
||||
} else {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// Send a webhook if update
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors)
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,20 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
alert "github.com/stakater/Reloader/internal/pkg/alerts"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
@@ -13,9 +22,9 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a deployment
|
||||
@@ -32,6 +41,20 @@ func GetDeploymentRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
// GetDeploymentRollingUpgradeFuncs returns all callback funcs for a cronjob
|
||||
func GetCronJobCreateJobFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
ItemsFunc: callbacks.GetCronJobItems,
|
||||
AnnotationsFunc: callbacks.GetCronJobAnnotations,
|
||||
PodAnnotationsFunc: callbacks.GetCronJobPodAnnotations,
|
||||
ContainersFunc: callbacks.GetCronJobContainers,
|
||||
InitContainersFunc: callbacks.GetCronJobInitContainers,
|
||||
UpdateFunc: callbacks.CreateJobFromCronjob,
|
||||
VolumesFunc: callbacks.GetCronJobVolumes,
|
||||
ResourceType: "CronJob",
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSetRollingUpgradeFuncs returns all callback funcs for a daemonset
|
||||
func GetDaemonSetRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
return callbacks.RollingUpgradeFuncs{
|
||||
@@ -88,31 +111,66 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
|
||||
config.ResourceName, config.Type, config.Namespace, webhookUrl)
|
||||
|
||||
body, errs := sendWebhook(webhookUrl)
|
||||
if errs != nil {
|
||||
// return the first error
|
||||
return errs[0]
|
||||
} else {
|
||||
logrus.Info(body)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func sendWebhook(url string) (string, []error) {
|
||||
request := gorequest.New()
|
||||
resp, _, err := request.Post(url).Send(`{"webhook":"update successful"}`).End()
|
||||
if err != nil {
|
||||
// the reloader seems to retry automatically so no retry logic added
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
var buffer bytes.Buffer
|
||||
_, bufferErr := io.Copy(&buffer, resp.Body)
|
||||
if bufferErr != nil {
|
||||
logrus.Error(bufferErr)
|
||||
}
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors)
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetStatefulSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if kube.IsOpenshift {
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetDeploymentConfigRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if options.IsArgoRollouts == "true" {
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors)
|
||||
err = rollingUpgrade(clients, config, GetArgoRolloutRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -121,17 +179,17 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
|
||||
err := PerformRollingUpgrade(clients, config, upgradeFuncs, collectors)
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// PerformRollingUpgrade upgrades the deployment if there is any change in configmap or secret data
|
||||
func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors) error {
|
||||
// PerformAction invokes the deployment if there is any change in configmap or secret data
|
||||
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
for _, i := range items {
|
||||
@@ -140,16 +198,40 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
if !found && !foundAuto && !foundSearchAnn {
|
||||
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
|
||||
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
|
||||
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
|
||||
|
||||
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
|
||||
annotations = upgradeFuncs.PodAnnotationsFunc(i)
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
|
||||
}
|
||||
|
||||
isResourceExcluded := false
|
||||
|
||||
switch config.Type {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if foundExcludeConfigmap {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
|
||||
}
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if foundExcludeSecret {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
|
||||
}
|
||||
}
|
||||
|
||||
if isResourceExcluded {
|
||||
continue
|
||||
}
|
||||
|
||||
result := constants.NotUpdated
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
if err == nil && reloaderEnabled {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
result = strategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
|
||||
if result != constants.Updated && annotationValue != "" {
|
||||
@@ -158,7 +240,7 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, false)
|
||||
result = strategy(upgradeFuncs, i, config, false)
|
||||
if result == constants.Updated {
|
||||
break
|
||||
}
|
||||
@@ -169,27 +251,66 @@ func PerformRollingUpgrade(clients kube.Clients, config util.Config, upgradeFunc
|
||||
if result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
result = invokeReloadStrategy(upgradeFuncs, i, config, true)
|
||||
result = strategy(upgradeFuncs, i, config, true)
|
||||
}
|
||||
}
|
||||
|
||||
if result == constants.Updated {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
resourceName := util.ToObjectMeta(i).Name
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resourceName := accessor.GetName()
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, i)
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
logrus.Infof("Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(i, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
|
||||
if excludedResources == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
excludedResourcesList := strings.Split(excludedResources, ",")
|
||||
for _, excludedResource := range excludedResourcesList {
|
||||
if strings.TrimSpace(excludedResource) == resourceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
for i := range volumes {
|
||||
if mountType == constants.ConfigmapEnvVarPostfix {
|
||||
@@ -261,7 +382,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -300,7 +421,9 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
return container
|
||||
}
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
@@ -308,7 +431,7 @@ func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return constants.NoContainerFound
|
||||
@@ -336,6 +459,13 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item inter
|
||||
return constants.Updated
|
||||
}
|
||||
|
||||
func getReloaderAnnotationKey() string {
|
||||
return fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, error) {
|
||||
if target == nil {
|
||||
return nil, errors.New("target is required")
|
||||
@@ -346,10 +476,7 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
|
||||
// Intentionally only storing the last item in order to keep
|
||||
// the generated annotations as small as possible.
|
||||
annotations := make(map[string]string)
|
||||
lastReloadedResourceName := fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
constants.LastReloadedFromAnnotation,
|
||||
)
|
||||
lastReloadedResourceName := getReloaderAnnotationKey()
|
||||
|
||||
lastReloadedResource, err := json.Marshal(target)
|
||||
if err != nil {
|
||||
@@ -360,9 +487,13 @@ func createReloadedAnnotations(target *util.ReloadSource) (map[string]string, er
|
||||
return annotations, nil
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item interface{}, config util.Config, autoReload bool) constants.Result {
|
||||
func getEnvVarName(resourceName string, typeName string) string {
|
||||
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) constants.Result {
|
||||
var result constants.Result
|
||||
envVar := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + config.Type
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
if container == nil {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
107
internal/pkg/leadership/leadership.go
Normal file
107
internal/pkg/leadership/leadership.go
Normal file
@@ -0,0 +1,107 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
||||
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// Used for liveness probe
|
||||
m sync.Mutex
|
||||
healthy bool = true
|
||||
)
|
||||
|
||||
func GetNewLock(client coordinationv1.CoordinationV1Interface, lockName, podname, namespace string) *resourcelock.LeaseLock {
|
||||
return &resourcelock.LeaseLock{
|
||||
LeaseMeta: v1.ObjectMeta{
|
||||
Name: lockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: client,
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: podname,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// runLeaderElection runs leadership election. If an instance of the controller is the leader and stops leading it will shutdown.
|
||||
func RunLeaderElection(lock *resourcelock.LeaseLock, ctx context.Context, cancel context.CancelFunc, id string, controllers []*controller.Controller) {
|
||||
// Construct channels for the controllers to use
|
||||
var stopChannels []chan struct{}
|
||||
for i := 0; i < len(controllers); i++ {
|
||||
stop := make(chan struct{})
|
||||
stopChannels = append(stopChannels, stop)
|
||||
}
|
||||
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: lock,
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 15 * time.Second,
|
||||
RenewDeadline: 10 * time.Second,
|
||||
RetryPeriod: 2 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(c context.Context) {
|
||||
logrus.Info("became leader, starting controllers")
|
||||
runControllers(controllers, stopChannels)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
logrus.Info("no longer leader, shutting down")
|
||||
stopControllers(stopChannels)
|
||||
cancel()
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
healthy = false
|
||||
},
|
||||
OnNewLeader: func(current_id string) {
|
||||
if current_id == id {
|
||||
logrus.Info("still the leader!")
|
||||
return
|
||||
}
|
||||
logrus.Infof("new leader is %s", current_id)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func runControllers(controllers []*controller.Controller, stopChannels []chan struct{}) {
|
||||
for i, c := range controllers {
|
||||
c := c
|
||||
go c.Run(1, stopChannels[i])
|
||||
}
|
||||
}
|
||||
|
||||
func stopControllers(stopChannels []chan struct{}) {
|
||||
for _, c := range stopChannels {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Healthz sets up the liveness probe endpoint. If leadership election is
|
||||
// enabled and a replica stops leading the liveness probe will fail and the
|
||||
// kubelet will restart the container.
|
||||
func SetupLivenessEndpoint() {
|
||||
http.HandleFunc("/live", healthz)
|
||||
}
|
||||
|
||||
func healthz(w http.ResponseWriter, req *http.Request) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if healthy {
|
||||
if i, err := w.Write([]byte("alive")); err != nil {
|
||||
logrus.Infof("failed to write liveness response, wrote: %d bytes, got err: %s", i, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
213
internal/pkg/leadership/leadership_test.go
Normal file
213
internal/pkg/leadership/leadership_test.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package leadership
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
|
||||
testutil.CreateNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
logrus.Infof("Running Testcases")
|
||||
retCode := m.Run()
|
||||
|
||||
testutil.DeleteNamespace(testutil.Namespace, testutil.Clients.KubernetesClient)
|
||||
|
||||
os.Exit(retCode)
|
||||
}
|
||||
|
||||
func TestHealthz(t *testing.T) {
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
healthy = false
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElection validates that the liveness endpoint serves 500 when
|
||||
// leadership election fails
|
||||
func TestRunLeaderElection(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), constants.LockName, testutil.Pod, testutil.Namespace)
|
||||
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, []*controller.Controller{})
|
||||
|
||||
// Liveness probe should be serving OK
|
||||
request, err := http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response := httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got := response.Code
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
// live endpoint serves 500
|
||||
cancel()
|
||||
|
||||
request, err = http.NewRequest(http.MethodGet, "/live", nil)
|
||||
if err != nil {
|
||||
t.Fatalf(("failed to create request"))
|
||||
}
|
||||
|
||||
response = httptest.NewRecorder()
|
||||
|
||||
healthz(response, request)
|
||||
got = response.Code
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// TestRunLeaderElectionWithControllers tests that leadership election works
|
||||
// with real controllers and that on context cancellation the controllers stop
|
||||
// running.
|
||||
func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
t.Logf("Creating controller")
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
c, err := controller.NewController(testutil.Clients.KubernetesClient, k, testutil.Namespace, []string{}, "", "", metrics.NewCollectors())
|
||||
if err != nil {
|
||||
logrus.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
controllers = append(controllers, c)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
lock := GetNewLock(testutil.Clients.KubernetesClient.CoordinationV1(), fmt.Sprintf("%s-%d", constants.LockName, 1), testutil.Pod, testutil.Namespace)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
// Start running leadership election, this also starts the controllers
|
||||
go RunLeaderElection(lock, ctx, cancel, testutil.Pod, controllers)
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create some stuff and do a thing
|
||||
configmapName := testutil.ConfigmapNamePrefix + "-update-" + testutil.RandSeq(5)
|
||||
configmapClient, err := testutil.CreateConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName, "www.google.com")
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating the configmap %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(testutil.Clients.KubernetesClient, configmapName, testutil.Namespace, true)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating configmap for first time
|
||||
updateErr := testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Fatalf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
|
||||
// Cancel the leader election context, so leadership is released
|
||||
logrus.Info("shutting down controller from test")
|
||||
cancel()
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Updating configmap again
|
||||
updateErr = testutil.UpdateConfigMap(configmapClient, testutil.Namespace, configmapName, "", "www.stakater.com/new")
|
||||
if updateErr != nil {
|
||||
t.Fatalf("Configmap was not updated")
|
||||
}
|
||||
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs = handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated = testutil.VerifyResourceEnvVarUpdate(testutil.Clients, config, constants.ConfigmapEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Fatalf("Deployment was updated")
|
||||
}
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting configmap
|
||||
err = testutil.DeleteConfigMap(testutil.Clients.KubernetesClient, testutil.Namespace, configmapName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the configmap %v", err)
|
||||
}
|
||||
time.Sleep(testutil.SleepDuration)
|
||||
}
|
||||
@@ -1,14 +1,16 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Collectors struct {
|
||||
Reloaded *prometheus.CounterVec
|
||||
Reloaded *prometheus.CounterVec
|
||||
ReloadedByNamespace *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func NewCollectors() Collectors {
|
||||
@@ -18,15 +20,29 @@ func NewCollectors() Collectors {
|
||||
Name: "reload_executed_total",
|
||||
Help: "Counter of reloads executed by Reloader.",
|
||||
},
|
||||
[]string{"success"},
|
||||
[]string{
|
||||
"success",
|
||||
},
|
||||
)
|
||||
|
||||
//set 0 as default value
|
||||
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
|
||||
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
|
||||
|
||||
reloaded_by_namespace := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reload_executed_total_by_namespace",
|
||||
Help: "Counter of reloads executed by Reloader by namespace.",
|
||||
},
|
||||
[]string{
|
||||
"success",
|
||||
"namespace",
|
||||
},
|
||||
)
|
||||
return Collectors{
|
||||
Reloaded: reloaded,
|
||||
Reloaded: reloaded,
|
||||
ReloadedByNamespace: reloaded_by_namespace,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,10 +50,11 @@ func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
|
||||
go func() {
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
logrus.Fatal(http.ListenAndServe(":9090", nil))
|
||||
}()
|
||||
if os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled" {
|
||||
prometheus.MustRegister(collectors.ReloadedByNamespace)
|
||||
}
|
||||
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
return collectors
|
||||
}
|
||||
|
||||
@@ -3,14 +3,24 @@ package options
|
||||
import "github.com/stakater/Reloader/internal/pkg/constants"
|
||||
|
||||
var (
|
||||
// Auto reload all resources when their corresponding configmaps/secrets are updated
|
||||
AutoReloadAll = false
|
||||
// ConfigmapUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// configmaps specified by name
|
||||
ConfigmapUpdateOnChangeAnnotation = "configmap.reloader.stakater.com/reload"
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
// ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto"
|
||||
// SecretReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
SecretReloaderAutoAnnotation = "secret.reloader.stakater.com/auto"
|
||||
// ConfigmapReloaderAutoAnnotation is a comma separated list of configmaps that excludes detecting changes on cms
|
||||
ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload"
|
||||
// SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets
|
||||
SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload"
|
||||
// AutoSearchAnnotation is an annotation to detect changes in
|
||||
// configmaps or triggers with the SearchMatchAnnotation
|
||||
AutoSearchAnnotation = "reloader.stakater.com/search"
|
||||
@@ -19,10 +29,19 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
LogLevel = ""
|
||||
// IsArgoRollouts Adds support for argo rollouts
|
||||
IsArgoRollouts = "false"
|
||||
// ReloadStrategy Specify the update strategy
|
||||
ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
// ReloadOnCreate Adds support to watch create events
|
||||
ReloadOnCreate = "false"
|
||||
// ReloadOnDelete Adds support to watch delete events
|
||||
ReloadOnDelete = "false"
|
||||
SyncAfterRestart = false
|
||||
// EnableHA adds support for running multiple replicas via leadership election
|
||||
EnableHA = false
|
||||
// Url to send a request to instead of triggering a reload
|
||||
WebhookUrl = ""
|
||||
)
|
||||
|
||||
@@ -16,11 +16,13 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@@ -34,6 +36,19 @@ var (
|
||||
SecretResourceType = "secrets"
|
||||
)
|
||||
|
||||
var (
|
||||
Clients = kube.GetClients()
|
||||
Pod = "test-reloader-" + RandSeq(5)
|
||||
Namespace = "test-reloader-" + RandSeq(5)
|
||||
ConfigmapNamePrefix = "testconfigmap-reloader"
|
||||
SecretNamePrefix = "testsecret-reloader"
|
||||
Data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
NewData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
UpdatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
Collectors = metrics.NewCollectors()
|
||||
SleepDuration = 3 * time.Second
|
||||
)
|
||||
|
||||
// CreateNamespace creates namespace for testing
|
||||
func CreateNamespace(namespace string, client kubernetes.Interface) {
|
||||
_, err := client.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
|
||||
@@ -54,24 +69,34 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectMeta(namespace string, name string, autoReload bool) metav1.ObjectMeta {
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload),
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool) map[string]string {
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
if autoReload {
|
||||
return map[string]string{
|
||||
options.ReloaderAutoAnnotation: "true"}
|
||||
annotations[options.ReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
if secretAutoReload {
|
||||
annotations[options.SecretReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
if configmapAutoReload {
|
||||
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
if len(annotations) > 0 {
|
||||
return annotations
|
||||
} else {
|
||||
return map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
}
|
||||
}
|
||||
|
||||
func getEnvVarSources(name string) []v1.EnvFromSource {
|
||||
@@ -317,7 +342,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -336,7 +361,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -351,7 +376,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -369,7 +394,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -386,7 +411,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -404,7 +429,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -418,7 +443,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -435,7 +460,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -450,14 +475,69 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true)
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false)
|
||||
return deployment
|
||||
}
|
||||
|
||||
func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
var objectMeta metav1.ObjectMeta
|
||||
if resourceType == SecretResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false)
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true)
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: objectMeta,
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
|
||||
annotation := map[string]string{}
|
||||
|
||||
if resourceType == SecretResourceType {
|
||||
annotation[options.SecretExcludeReloaderAnnotation] = deploymentName
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: deploymentName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: annotation,
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
},
|
||||
Replicas: &replicaset,
|
||||
Strategy: appsv1.DeploymentStrategy{
|
||||
Type: appsv1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: getPodTemplateSpecWithVolumes(deploymentName),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -472,7 +552,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -488,7 +568,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -504,7 +584,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -600,7 +680,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
//ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
// ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
if resourceType == SecretResourceType {
|
||||
@@ -714,6 +794,25 @@ func CreateDeploymentWithEnvVarSourceAndAnnotations(client kubernetes.Interface,
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithTypedAutoAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation
|
||||
func CreateDeploymentWithTypedAutoAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithTypedAutoAnnotation(namespace, deploymentName, resourceType)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentWithExcludeAnnotation creates a deployment in given namespace and returns the Deployment with typed auto annotation
|
||||
func CreateDeploymentWithExcludeAnnotation(client kubernetes.Interface, deploymentName string, namespace string, resourceType string) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
deploymentObj := GetDeploymentWithExcludeAnnotation(namespace, deploymentName, resourceType)
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDaemonSet creates a deployment in given namespace and returns the DaemonSet
|
||||
func CreateDaemonSet(client kubernetes.Interface, daemonsetName string, namespace string, volumeMount bool) (*appsv1.DaemonSet, error) {
|
||||
logrus.Infof("Creating DaemonSet")
|
||||
@@ -822,7 +921,6 @@ func DeleteSecret(client kubernetes.Interface, namespace string, secretName stri
|
||||
|
||||
// RandSeq generates a random sequence
|
||||
func RandSeq(n int) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
@@ -835,13 +933,20 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
@@ -869,18 +974,74 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, errTyped := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled || errTyped == nil && typedAutoAnnotationEnabled {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.Trim(value, " ")
|
||||
if value == config.ResourceName {
|
||||
matches = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if searchAnnotationValue == "true" {
|
||||
if config.ResourceAnnotations[options.SearchMatchAnnotation] == "true" {
|
||||
matches = true
|
||||
}
|
||||
}
|
||||
|
||||
if matches {
|
||||
envName := constants.EnvVarPrefix + util.ConvertToEnvVarName(config.ResourceName) + "_" + envVarPostfix
|
||||
value := GetResourceSHAFromEnvVar(containers, envName)
|
||||
if value == "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
accessor, err := meta.Accessor(i)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
annotations := accessor.GetAnnotations()
|
||||
// match statefulsets with the correct annotation
|
||||
annotationValue := util.ToObjectMeta(i).Annotations[config.Annotation]
|
||||
searchAnnotationValue := util.ToObjectMeta(i).Annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := util.ToObjectMeta(i).Annotations[options.ReloaderAutoAnnotation]
|
||||
reloaderEnabled, err := strconv.ParseBool(reloaderEnabledValue)
|
||||
annotationValue := annotations[config.Annotation]
|
||||
searchAnnotationValue := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue := annotations[config.TypedAutoAnnotation]
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
matches := false
|
||||
if err == nil && reloaderEnabled {
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
matches = true
|
||||
} else if annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
@@ -906,3 +1067,7 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetSHAfromEmptyData() string {
|
||||
return crypto.GenerateSHA("")
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ type Config struct {
|
||||
ResourceName string
|
||||
ResourceAnnotations map[string]string
|
||||
Annotation string
|
||||
TypedAutoAnnotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
}
|
||||
@@ -23,6 +24,7 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
ResourceName: configmap.Name,
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
}
|
||||
@@ -35,6 +37,7 @@ func GetSecretConfig(secret *v1.Secret) Config {
|
||||
ResourceName: secret.Name,
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
}
|
||||
|
||||
@@ -54,6 +54,8 @@ func GetSHAfromSecret(data map[string][]byte) string {
|
||||
|
||||
type List []string
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
if v == s {
|
||||
|
||||
31
okteto.yml
31
okteto.yml
@@ -1,14 +1,17 @@
|
||||
name: reloader-reloader
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
dev:
|
||||
reloader-reloader:
|
||||
image: okteto/golang:1
|
||||
command: bash
|
||||
volumes:
|
||||
- /go/pkg/
|
||||
- /root/.cache/go-build/
|
||||
sync:
|
||||
- .:/app
|
||||
forward:
|
||||
- 2345:2345
|
||||
workdir: /app
|
||||
autocreate: true
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_PTRACE
|
||||
|
||||
@@ -9,4 +9,5 @@ import (
|
||||
var ResourceMap = map[string]runtime.Object{
|
||||
"configMaps": &v1.ConfigMap{},
|
||||
"secrets": &v1.Secret{},
|
||||
"namespaces": &v1.Namespace{},
|
||||
}
|
||||
|
||||
38
renovate.json
Normal file
38
renovate.json
Normal file
@@ -0,0 +1,38 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:recommended"
|
||||
],
|
||||
"labels": [
|
||||
"dependencies"
|
||||
],
|
||||
"rebaseWhen": "never",
|
||||
"vulnerabilityAlerts": {
|
||||
"enabled": true,
|
||||
"labels": ["security"]
|
||||
},
|
||||
|
||||
"customManagers": [
|
||||
{
|
||||
"customType": "regex",
|
||||
"fileMatch": [
|
||||
".vale.ini"
|
||||
],
|
||||
"matchStrings": [
|
||||
"https:\/\/github\\.com\/(?<depName>.*)\/releases\/download\/(?<currentValue>.*)\/.*\\.zip"
|
||||
],
|
||||
"datasourceTemplate": "github-releases"
|
||||
},
|
||||
{
|
||||
"customType": "regex",
|
||||
"description": "Update Helm Chart values file",
|
||||
"fileMatch": [
|
||||
"values\\.yaml$"
|
||||
],
|
||||
"matchStrings": [
|
||||
"image:\\s*name: (?<depName>[a-zA-Z0-9\\.\\/]*)\\s*tag: (?<currentValue>[a-zA-Z0-9\\.\\/]*)"
|
||||
],
|
||||
"datasourceTemplate": "docker"
|
||||
}
|
||||
]
|
||||
}
|
||||
8
sonar-project.properties
Normal file
8
sonar-project.properties
Normal file
@@ -0,0 +1,8 @@
|
||||
sonar.projectKey=Reloader
|
||||
sonar.sources=.
|
||||
sonar.exclusions=**/*_test.go
|
||||
sonar.language=go
|
||||
|
||||
sonar.tests=.
|
||||
sonar.test.inclusions=**/*_test.go
|
||||
sonar.analysisCache.enabled=false
|
||||
1
theme_common
Submodule
1
theme_common
Submodule
Submodule theme_common added at 0eef29f4cb
22
theme_override/mkdocs.yml
Normal file
22
theme_override/mkdocs.yml
Normal file
@@ -0,0 +1,22 @@
|
||||
site_name: Stakater Reloader
|
||||
docs_dir: docs
|
||||
site_url: https://docs.stakater.com/reloader/
|
||||
repo_url: https://github.com/stakater/reloader
|
||||
edit_uri: blob/master/docs/
|
||||
|
||||
theme:
|
||||
favicon: assets/images/favicon.svg
|
||||
|
||||
nav:
|
||||
- index.md
|
||||
- How-to Guides:
|
||||
- Verify-Reloader-Working.md
|
||||
- Alerting.md
|
||||
- Reloader-with-Sealed-Secrets.md
|
||||
- Helm2-to-Helm3.md
|
||||
- References:
|
||||
- How-it-works.md
|
||||
- Container Build.md
|
||||
- Comparisons with similar tools:
|
||||
- Reloader-vs-ConfigmapController.md
|
||||
- Reloader-vs-k8s-trigger-controller.md
|
||||
0
theme_override/resources/.gitignore
vendored
Normal file
0
theme_override/resources/.gitignore
vendored
Normal file
1
theme_override/resources/assets/images/favicon.svg
Normal file
1
theme_override/resources/assets/images/favicon.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 10 KiB |
10
ubi-build-files-amd64.txt
Normal file
10
ubi-build-files-amd64.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
etc/pki
|
||||
root/buildinfo
|
||||
etc/ssl/certs
|
||||
etc/redhat-release
|
||||
usr/share/zoneinfo
|
||||
usr/lib64/ld-linux-x86-64.so.2
|
||||
usr/lib64/libc.so.6
|
||||
usr/lib64/libdl.so.2
|
||||
usr/lib64/libpthread.so.0
|
||||
usr/lib64/libm.so.6
|
||||
10
ubi-build-files-arm64.txt
Normal file
10
ubi-build-files-arm64.txt
Normal file
@@ -0,0 +1,10 @@
|
||||
etc/pki
|
||||
root/buildinfo
|
||||
etc/ssl/certs
|
||||
etc/redhat-release
|
||||
usr/share/zoneinfo
|
||||
usr/lib/ld-linux-aarch64.so.1
|
||||
usr/lib64/libc.so.6
|
||||
usr/lib64/libdl.so.2
|
||||
usr/lib64/libpthread.so.0
|
||||
usr/lib64/libm.so.6
|
||||
Reference in New Issue
Block a user