mirror of
https://github.com/mailcow/mailcow-dockerized.git
synced 2026-02-15 21:10:04 +00:00
Compare commits
752 Commits
2024-01b
...
feat/pytho
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8d211ea767 | ||
|
|
4cc463a728 | ||
|
|
909eb8a63a | ||
|
|
4c4a440cdf | ||
|
|
743bfcec60 | ||
|
|
744aa5d137 | ||
|
|
e8d155d7e0 | ||
|
|
6382e3128d | ||
|
|
bc11aed753 | ||
|
|
a6e13daa8c | ||
|
|
eb7d2628ac | ||
|
|
5f93ff04a9 | ||
|
|
f35def48cb | ||
|
|
dba9675a9b | ||
|
|
13b4f86d29 | ||
|
|
3eb17a5f78 | ||
|
|
c38a4c203e | ||
|
|
f329549c2e | ||
|
|
767d746419 | ||
|
|
9174a05af3 | ||
|
|
faf8fa8c2c | ||
|
|
55d90afee4 | ||
|
|
669f75182d | ||
|
|
5a39ae45cb | ||
|
|
8baa3c9fb5 | ||
|
|
b8888521f1 | ||
|
|
2efea9c832 | ||
|
|
5a097ed5f7 | ||
|
|
cde2ba4851 | ||
|
|
1d482ed425 | ||
|
|
d3185c3c68 | ||
|
|
03d979c089 | ||
|
|
ffa2933873 | ||
|
|
7f47a3f00e | ||
|
|
1bcab9a9a5 | ||
|
|
1b2f424edc | ||
|
|
486b297409 | ||
|
|
75d7f06b25 | ||
|
|
ea0944d743 | ||
|
|
cb6ffe65c8 | ||
|
|
580dabd276 | ||
|
|
846862aa80 | ||
|
|
e7a1f24c78 | ||
|
|
8ff0e029f0 | ||
|
|
0680b21938 | ||
|
|
0c8e7bfeca | ||
|
|
badcd27b93 | ||
|
|
7d3ef3d67f | ||
|
|
5b89e253a6 | ||
|
|
a90f4c2a2e | ||
|
|
db7b917944 | ||
|
|
401b744808 | ||
|
|
0c83255573 | ||
|
|
d55f0fc366 | ||
|
|
06b3ba91a0 | ||
|
|
aa4125fe62 | ||
|
|
d8c6ed9191 | ||
|
|
cb47fa406f | ||
|
|
c4d0f35008 | ||
|
|
0d3e8dd738 | ||
|
|
692355a08a | ||
|
|
a370499aaa | ||
|
|
84f67d6608 | ||
|
|
4ac839cf49 | ||
|
|
b96a5b1efd | ||
|
|
766c5e8580 | ||
|
|
3ddad9dee8 | ||
|
|
2c10c39bc4 | ||
|
|
0eb8f38792 | ||
|
|
402bf53a5c | ||
|
|
428a59dd3f | ||
|
|
153890b283 | ||
|
|
a741c2ba4a | ||
|
|
741e5c719f | ||
|
|
34e4f93db9 | ||
|
|
3758135dc3 | ||
|
|
6794e6ff43 | ||
|
|
62f816e64a | ||
|
|
e65478076b | ||
|
|
ceeabded73 | ||
|
|
805634f9a9 | ||
|
|
a92832d115 | ||
|
|
4c5f485587 | ||
|
|
db3a577ae3 | ||
|
|
e452917de9 | ||
|
|
f37961b7d0 | ||
|
|
0157cbddaf | ||
|
|
65d872cc14 | ||
|
|
4ad2422810 | ||
|
|
9b41b24522 | ||
|
|
1c9d80f554 | ||
|
|
7172cad257 | ||
|
|
b550c6f88e | ||
|
|
5baf9eb375 | ||
|
|
4eb89f67ed | ||
|
|
efdc798238 | ||
|
|
8408b82e9c | ||
|
|
65fb4c2aa8 | ||
|
|
a5ca3353da | ||
|
|
95aa35e133 | ||
|
|
21b11ed999 | ||
|
|
348107dae8 | ||
|
|
fcb1b29c89 | ||
|
|
05fc4f7aba | ||
|
|
cd3b1ab828 | ||
|
|
d584dd387e | ||
|
|
986b0afbfa | ||
|
|
59d139bc63 | ||
|
|
ad5f07f077 | ||
|
|
cf2d3c1b4e | ||
|
|
91c82e8a67 | ||
|
|
ba7437a8f3 | ||
|
|
684256b66e | ||
|
|
70ba361583 | ||
|
|
94d4817ecb | ||
|
|
72ced70e33 | ||
|
|
887b7114a8 | ||
|
|
ceebc56e62 | ||
|
|
8910135f02 | ||
|
|
463e3ab78c | ||
|
|
2a15914324 | ||
|
|
e21696ff27 | ||
|
|
5a7275843a | ||
|
|
c93106f9d6 | ||
|
|
43c1597051 | ||
|
|
c3aa4f7418 | ||
|
|
cb08132a74 | ||
|
|
2596b9d386 | ||
|
|
062539b7d7 | ||
|
|
18acbc7a4c | ||
|
|
2f93f1d0c5 | ||
|
|
0860a7503e | ||
|
|
86df78255d | ||
|
|
aac0a900ce | ||
|
|
03565df48d | ||
|
|
5f15475b55 | ||
|
|
25d34b5acf | ||
|
|
6b165887d8 | ||
|
|
82eb3c64cd | ||
|
|
bc21e7fe50 | ||
|
|
6f9c8deab7 | ||
|
|
8761d8fc47 | ||
|
|
0435766c17 | ||
|
|
79f4cf4021 | ||
|
|
81803836f0 | ||
|
|
4bd267515a | ||
|
|
70190e5230 | ||
|
|
5296085189 | ||
|
|
a4c2cf4c67 | ||
|
|
3c9d0c9d57 | ||
|
|
35a6f81d0d | ||
|
|
4b31c04e3e | ||
|
|
3d9cc2f6dd | ||
|
|
704dd50262 | ||
|
|
8d0c03b2fc | ||
|
|
c4a0e370b7 | ||
|
|
b77ff2f51c | ||
|
|
787fa49d0c | ||
|
|
a6c38590ca | ||
|
|
e52323bf1d | ||
|
|
f15ee39b63 | ||
|
|
fcebe98557 | ||
|
|
6ec5e88793 | ||
|
|
7d35646342 | ||
|
|
321965adee | ||
|
|
7bce5d836b | ||
|
|
351f4ce787 | ||
|
|
a567d5dc31 | ||
|
|
4ac541f671 | ||
|
|
f6dc0b463f | ||
|
|
16e22e23dc | ||
|
|
d8afa6f393 | ||
|
|
836e3f15b7 | ||
|
|
aaa7e4a184 | ||
|
|
3912341b32 | ||
|
|
735d5f0e56 | ||
|
|
f375794fb7 | ||
|
|
4ed3017a02 | ||
|
|
ef2f5f7be0 | ||
|
|
54728bf780 | ||
|
|
743e88fd67 | ||
|
|
ac2f0c7db1 | ||
|
|
f64c6aa1d4 | ||
|
|
e2cf22ff9e | ||
|
|
55dcae4a01 | ||
|
|
f0016eeecd | ||
|
|
3544a2246e | ||
|
|
97890b71f1 | ||
|
|
e645f931dc | ||
|
|
bbdec0960a | ||
|
|
41ba7d97fa | ||
|
|
83fc2c6387 | ||
|
|
aac4c6b5f4 | ||
|
|
3c0f775e2f | ||
|
|
3a81b84cf7 | ||
|
|
a2e87e0880 | ||
|
|
2407aa7895 | ||
|
|
0ad327bbe5 | ||
|
|
1a087bb2c8 | ||
|
|
65bc581fab | ||
|
|
60a2270d1e | ||
|
|
cb5cae3e44 | ||
|
|
8ed51e500f | ||
|
|
aca01c8aa2 | ||
|
|
45d14254f2 | ||
|
|
de6bd222fc | ||
|
|
04116982a5 | ||
|
|
36d4fcbf39 | ||
|
|
04058ab06e | ||
|
|
9d791d0c4f | ||
|
|
da02e26172 | ||
|
|
43f945fe01 | ||
|
|
e76c0ba9a6 | ||
|
|
d83111568e | ||
|
|
1b578caabb | ||
|
|
1e77f8d8a1 | ||
|
|
5f45f8ae34 | ||
|
|
1dac8f1f66 | ||
|
|
5a04942d89 | ||
|
|
a30f6696a3 | ||
|
|
d430b595c1 | ||
|
|
1fca328266 | ||
|
|
7bcd61ecb5 | ||
|
|
ee7a8624fc | ||
|
|
4708b1398b | ||
|
|
746915cbdd | ||
|
|
36db68677c | ||
|
|
08599c1960 | ||
|
|
31e001ebee | ||
|
|
1e70a20188 | ||
|
|
8048e0a53c | ||
|
|
8fea9fc21f | ||
|
|
2f1884e94b | ||
|
|
24b3d8f850 | ||
|
|
d280025b51 | ||
|
|
abd789f629 | ||
|
|
69f6a82905 | ||
|
|
10328981b6 | ||
|
|
150b2bbd9d | ||
|
|
40a8bc808a | ||
|
|
d92aa4b15d | ||
|
|
2d2dacb70e | ||
|
|
ade20d79d4 | ||
|
|
65bc8f0972 | ||
|
|
c6f6eda0bf | ||
|
|
357a4d7fb3 | ||
|
|
1c6684a539 | ||
|
|
de80c120c9 | ||
|
|
3e8bb06a37 | ||
|
|
b087ac9e27 | ||
|
|
d09e4ff020 | ||
|
|
f065842402 | ||
|
|
3875e8377a | ||
|
|
7c8e5c10ca | ||
|
|
1a8e1a2677 | ||
|
|
0d635e2658 | ||
|
|
60ca25026d | ||
|
|
69b03791a2 | ||
|
|
ed2837edd8 | ||
|
|
fa3b789fbb | ||
|
|
49e05f5120 | ||
|
|
24453993f3 | ||
|
|
8853e2c44a | ||
|
|
c9dd102741 | ||
|
|
d1af52b4e7 | ||
|
|
bbddfc3eab | ||
|
|
a41bb55c83 | ||
|
|
b6174fae23 | ||
|
|
1d6513ffba | ||
|
|
6e8e13cebc | ||
|
|
896a9638d6 | ||
|
|
83e53eb524 | ||
|
|
f36184df64 | ||
|
|
6fa1c9f63d | ||
|
|
f3060b37a6 | ||
|
|
59c68f2603 | ||
|
|
ccc8595665 | ||
|
|
45c13c687b | ||
|
|
d61a08c2a9 | ||
|
|
c8c4cfd939 | ||
|
|
ec4b9b088c | ||
|
|
b2db8e6b31 | ||
|
|
05e4bd7602 | ||
|
|
31185e3de1 | ||
|
|
4dbfd3abad | ||
|
|
b4e6002bcf | ||
|
|
6af907cff0 | ||
|
|
ba282233ea | ||
|
|
6f4c2b3361 | ||
|
|
d08b9aec32 | ||
|
|
bb310600b2 | ||
|
|
fe7211f27f | ||
|
|
8e9a9364a8 | ||
|
|
6831f94fdb | ||
|
|
b0de756a7c | ||
|
|
922f8777b0 | ||
|
|
c1903f121d | ||
|
|
89fb1322c6 | ||
|
|
852d944cfb | ||
|
|
bca4e1a03d | ||
|
|
326a446f8b | ||
|
|
70ca5fde95 | ||
|
|
5ad4ab5b60 | ||
|
|
bd9f4ba0a5 | ||
|
|
d10d64dd92 | ||
|
|
6d1f7482ed | ||
|
|
b9f52df3f1 | ||
|
|
dc379267a9 | ||
|
|
4d688c5500 | ||
|
|
b90375b6e5 | ||
|
|
9542698e95 | ||
|
|
afe0ba74d2 | ||
|
|
dc5a28111d | ||
|
|
52f3f93aee | ||
|
|
6550f0a3e8 | ||
|
|
0a58aa293a | ||
|
|
be79f320d2 | ||
|
|
6ec1e357c3 | ||
|
|
8b2f71f97e | ||
|
|
93cf99cc9e | ||
|
|
d8c8e4ab1b | ||
|
|
2d76ffc88c | ||
|
|
672bb345fd | ||
|
|
5c88030b5a | ||
|
|
b106945c73 | ||
|
|
502a7100ca | ||
|
|
ee2791d93a | ||
|
|
399630cf34 | ||
|
|
fce93609dd | ||
|
|
38907b5032 | ||
|
|
5a0f20b9ea | ||
|
|
8dcaffe925 | ||
|
|
c53bf85480 | ||
|
|
982e823c71 | ||
|
|
382056ec18 | ||
|
|
4c9690e87c | ||
|
|
9a58e5e35a | ||
|
|
932cf453de | ||
|
|
1538fda71c | ||
|
|
54a0d53deb | ||
|
|
fda95301ba | ||
|
|
f9304dcd9b | ||
|
|
1528e8766a | ||
|
|
0b9b8c9060 | ||
|
|
220fdbb168 | ||
|
|
fe3d08515e | ||
|
|
22f7f61ac9 | ||
|
|
0d2046baeb | ||
|
|
29d8cfe2ba | ||
|
|
f2e35dff68 | ||
|
|
b1368d29d1 | ||
|
|
0d704a57f5 | ||
|
|
462137ede7 | ||
|
|
bb6f405841 | ||
|
|
8b2d67169b | ||
|
|
710cec996c | ||
|
|
0129f84a32 | ||
|
|
ae3653a925 | ||
|
|
82fcddb177 | ||
|
|
320bd31d37 | ||
|
|
b307e0a0d5 | ||
|
|
af0c61b90a | ||
|
|
7203735532 | ||
|
|
ef238e5332 | ||
|
|
4f9e37c0c3 | ||
|
|
d21c1bfa72 | ||
|
|
822d9a7de6 | ||
|
|
37beed6ad9 | ||
|
|
0066040bdc | ||
|
|
75f18df143 | ||
|
|
8e7b27aae4 | ||
|
|
c62b467ac4 | ||
|
|
be5a181be5 | ||
|
|
dbf87e99fc | ||
|
|
10dfd0a443 | ||
|
|
cc5138da13 | ||
|
|
89398c4726 | ||
|
|
aeeac63e1f | ||
|
|
8971b11c49 | ||
|
|
bb7fd483f7 | ||
|
|
439a936fd8 | ||
|
|
ffcd242048 | ||
|
|
567ebbc324 | ||
|
|
f9a7712025 | ||
|
|
3d62869664 | ||
|
|
e21157c10d | ||
|
|
b70bcd36fb | ||
|
|
cb50d08605 | ||
|
|
f3da8bb85f | ||
|
|
12e4d639f0 | ||
|
|
eb3f88fc91 | ||
|
|
9a729d89bf | ||
|
|
fa3c453d6e | ||
|
|
962ac39e4a | ||
|
|
74b4097ee0 | ||
|
|
e00d0d5f8d | ||
|
|
c5e399ebc2 | ||
|
|
cb9ca772b1 | ||
|
|
ebc8e6b838 | ||
|
|
162f05ccda | ||
|
|
6c97c4f372 | ||
|
|
1fc964d72e | ||
|
|
5571d80ae6 | ||
|
|
6d4fcacd83 | ||
|
|
1994f706c0 | ||
|
|
e34afd3fdd | ||
|
|
a6f71faf46 | ||
|
|
3396e1b427 | ||
|
|
b26ccc2019 | ||
|
|
58a5a4578c | ||
|
|
b1c1e403d2 | ||
|
|
519d95cb8b | ||
|
|
092d3cd80b | ||
|
|
c034f4bd27 | ||
|
|
8753ea2be6 | ||
|
|
9fee568082 | ||
|
|
73d60eb085 | ||
|
|
b39b7c24a5 | ||
|
|
294a406b91 | ||
|
|
8b933f1967 | ||
|
|
d0ecb72e08 | ||
|
|
824a473fea | ||
|
|
7f790c5360 | ||
|
|
52431a3942 | ||
|
|
8017394e9d | ||
|
|
772d5c51fd | ||
|
|
76194be7dd | ||
|
|
3b23afa0ff | ||
|
|
6e00d653ce | ||
|
|
b6c036496d | ||
|
|
5d7c9b20bc | ||
|
|
4b400eadb1 | ||
|
|
ab2abda8cc | ||
|
|
2fe21e9641 | ||
|
|
b7ed6982d8 | ||
|
|
fd927853cb | ||
|
|
c48f4f4ab8 | ||
|
|
a4c006828e | ||
|
|
b56291f62b | ||
|
|
0cdf7647c4 | ||
|
|
8fe1cc4961 | ||
|
|
bf050f17c4 | ||
|
|
edd85dea8d | ||
|
|
3bf90c1f73 | ||
|
|
292306b191 | ||
|
|
b3e0a66222 | ||
|
|
e994cf4d05 | ||
|
|
cc0dc2eae0 | ||
|
|
a001a0584f | ||
|
|
926af87cfb | ||
|
|
b0339372b5 | ||
|
|
e398cb91e9 | ||
|
|
6ee0303b0f | ||
|
|
68616c2d57 | ||
|
|
f8de520d29 | ||
|
|
10077ece31 | ||
|
|
c918726143 | ||
|
|
3885b07a99 | ||
|
|
fcf27d640d | ||
|
|
82fde23cc1 | ||
|
|
9b86ff764e | ||
|
|
cbca306fc1 | ||
|
|
6a8986fe4f | ||
|
|
ff34eb12e2 | ||
|
|
57bc03b878 | ||
|
|
fbecd60e56 | ||
|
|
c37bf0bb32 | ||
|
|
2208d7e6fb | ||
|
|
e426c3a7e7 | ||
|
|
03fccb28e9 | ||
|
|
8fbfd99dd6 | ||
|
|
7f7a869678 | ||
|
|
73257151c4 | ||
|
|
efb2572f0f | ||
|
|
66aa28b5de | ||
|
|
987a027339 | ||
|
|
eea81e21f6 | ||
|
|
a689109f44 | ||
|
|
58c0a46459 | ||
|
|
2dbe8bf4ca | ||
|
|
ef7ec06947 | ||
|
|
fc7ea7a247 | ||
|
|
9b478b3859 | ||
|
|
384e5a2e64 | ||
|
|
aadeeb0df3 | ||
|
|
f33d82ffc1 | ||
|
|
ffeeb179e1 | ||
|
|
8e2d3a6db5 | ||
|
|
70126e1f0c | ||
|
|
b9ae174a6a | ||
|
|
9715c57314 | ||
|
|
b9f8959d92 | ||
|
|
9c814cc182 | ||
|
|
cf6594220c | ||
|
|
2cf952eb36 | ||
|
|
6fc86dd7d3 | ||
|
|
bf13af9691 | ||
|
|
1af9c21a50 | ||
|
|
443941e687 | ||
|
|
527577b438 | ||
|
|
9daf2d80c0 | ||
|
|
38b0641742 | ||
|
|
f675af5bb0 | ||
|
|
533c4e7956 | ||
|
|
1b2c2c0037 | ||
|
|
97768494e1 | ||
|
|
4a052da289 | ||
|
|
18d7a55b15 | ||
|
|
9ca2fb7ccf | ||
|
|
b4e8355827 | ||
|
|
e0bde1c459 | ||
|
|
27c007ebd3 | ||
|
|
f7ae2a6162 | ||
|
|
8f3ea09732 | ||
|
|
af626d98d3 | ||
|
|
34b0574e56 | ||
|
|
49d738809b | ||
|
|
2fa3a22eca | ||
|
|
dc5eb6f92e | ||
|
|
ba8902f0b1 | ||
|
|
3080a70287 | ||
|
|
11e9a77840 | ||
|
|
64cd7e74c5 | ||
|
|
cac65d081e | ||
|
|
e5ada994be | ||
|
|
6ba2459645 | ||
|
|
58f63aad08 | ||
|
|
8a8687a63c | ||
|
|
f7f93c360d | ||
|
|
c160e1f68e | ||
|
|
47c08ab8d2 | ||
|
|
cd83ffbaa2 | ||
|
|
e12981a821 | ||
|
|
47fd1bb894 | ||
|
|
20582b6353 | ||
|
|
caee770e36 | ||
|
|
95d6eeb37a | ||
|
|
eadf70d809 | ||
|
|
c8ff5387c0 | ||
|
|
7cb138d515 | ||
|
|
3dd4c45fab | ||
|
|
549539bec9 | ||
|
|
e449cac464 | ||
|
|
62e458f39b | ||
|
|
b37caaf9e5 | ||
|
|
7660ca89ae | ||
|
|
6e9c3e2687 | ||
|
|
cf2fda66e2 | ||
|
|
cd24057f1a | ||
|
|
c68a436a22 | ||
|
|
36b5cccd18 | ||
|
|
9decfa9c31 | ||
|
|
3aee2b6cf5 | ||
|
|
17d797cee4 | ||
|
|
75550eeea3 | ||
|
|
0d09c86c12 | ||
|
|
2db8f482db | ||
|
|
00d4b32a1b | ||
|
|
8a82bab1f3 | ||
|
|
237a25e6b0 | ||
|
|
5dc836671d | ||
|
|
26be1cb602 | ||
|
|
dc7a48cbf9 | ||
|
|
52455be815 | ||
|
|
5c851f2935 | ||
|
|
bbbdcfb625 | ||
|
|
b054a57e16 | ||
|
|
fd73b3ad88 | ||
|
|
0807c122f6 | ||
|
|
e0bda6ca6a | ||
|
|
2ba64e93f9 | ||
|
|
e1c3ad9fe8 | ||
|
|
8c0637b556 | ||
|
|
914a8204d4 | ||
|
|
d92ffe8fc7 | ||
|
|
e0eb3a4f13 | ||
|
|
1fb0060a73 | ||
|
|
d7430bf516 | ||
|
|
35f039a119 | ||
|
|
ffbf1758e0 | ||
|
|
a3af2d8392 | ||
|
|
39a4b115ed | ||
|
|
881c2d6e02 | ||
|
|
d237157c0b | ||
|
|
6928eb632e | ||
|
|
79432a40d7 | ||
|
|
010d898786 | ||
|
|
766c270b1f | ||
|
|
916d0fd46a | ||
|
|
9561526f33 | ||
|
|
45811bc2dc | ||
|
|
132e37bfec | ||
|
|
b3e26e14ef | ||
|
|
a3bb889def | ||
|
|
3a1dcb3aaf | ||
|
|
d22cafacc8 | ||
|
|
78e7266368 | ||
|
|
a06c78362a | ||
|
|
d479d18507 | ||
|
|
98cdb95bc0 | ||
|
|
02a55ce9db | ||
|
|
6f4720e1ea | ||
|
|
6a807b7799 | ||
|
|
8d4ef147d2 | ||
|
|
8ed6217d1c | ||
|
|
7dae4a976d | ||
|
|
3b83949ba3 | ||
|
|
d8baadb991 | ||
|
|
7d3f9fa407 | ||
|
|
705d144a85 | ||
|
|
ff05cff36c | ||
|
|
861fa7b145 | ||
|
|
d65a0bba44 | ||
|
|
dac1bd88dc | ||
|
|
288dbfa37c | ||
|
|
a0e55cb9b1 | ||
|
|
86ba019ca0 | ||
|
|
19deda31bc | ||
|
|
4f47534824 | ||
|
|
3cb9c2ece5 | ||
|
|
1787c53d98 | ||
|
|
8ae762a8c8 | ||
|
|
63426c3cd0 | ||
|
|
e184713c67 | ||
|
|
40146839ef | ||
|
|
448f85abe8 | ||
|
|
9a4b79a629 | ||
|
|
058b79ed5c | ||
|
|
216398355b | ||
|
|
1cda16523d | ||
|
|
2f1e1438e9 | ||
|
|
9039ab4e12 | ||
|
|
db47696ba7 | ||
|
|
eb9e3b8391 | ||
|
|
ba32f1131e | ||
|
|
27ef04baa0 | ||
|
|
b3a94e79e3 | ||
|
|
3a4c0c84a3 | ||
|
|
73a044ec14 | ||
|
|
389eb99c10 | ||
|
|
597d98e1d7 | ||
|
|
981307a1c6 | ||
|
|
2d51881ae3 | ||
|
|
788f03e993 | ||
|
|
81024b8c12 | ||
|
|
89c5064213 | ||
|
|
4b18a99e55 | ||
|
|
92d2cca7c3 | ||
|
|
466e36ecbb | ||
|
|
7ec7bd21cb | ||
|
|
38db7226a8 | ||
|
|
60f9412bb8 | ||
|
|
737c0502ac | ||
|
|
6da41b1027 | ||
|
|
2bd46ae0fd | ||
|
|
c15ab10b1b | ||
|
|
ddaeebc822 | ||
|
|
e64293c82f | ||
|
|
ccc17e4a20 | ||
|
|
a53ef2ed7a | ||
|
|
185c36cdfe | ||
|
|
9beb47c067 | ||
|
|
3d486678ae | ||
|
|
04e2494af8 | ||
|
|
7b47159478 | ||
|
|
17b6ac3313 | ||
|
|
43600cd127 | ||
|
|
6d3a32c1d9 | ||
|
|
21fa3c8458 | ||
|
|
6df663825a | ||
|
|
8ce4600562 | ||
|
|
3179c0e712 | ||
|
|
37254738e2 | ||
|
|
a4cce147aa | ||
|
|
b176585a9c | ||
|
|
f8647bb15e | ||
|
|
85368971fd | ||
|
|
e4284b8e19 | ||
|
|
5545d8a56c | ||
|
|
4dc3222f03 | ||
|
|
7cf6a9d808 | ||
|
|
95a15d18a7 | ||
|
|
cee771a3fb | ||
|
|
a805d3b2e3 | ||
|
|
b251c58b23 | ||
|
|
cc7516685f | ||
|
|
ad19ff5429 | ||
|
|
e784c98a5a | ||
|
|
28679eb916 | ||
|
|
c8fec24da3 | ||
|
|
0c1e2ed6f2 | ||
|
|
90476ae057 | ||
|
|
3b6a1d50bd | ||
|
|
1ab1505c88 | ||
|
|
593e581cf3 | ||
|
|
e202d00beb | ||
|
|
dca5f1baab | ||
|
|
f0689e08d9 | ||
|
|
5bbb12b53e | ||
|
|
c6a56e0748 | ||
|
|
3c62a7fd9f | ||
|
|
61ab17d8a1 | ||
|
|
d4ae616460 | ||
|
|
b7a18255fe | ||
|
|
1c73a16ca0 | ||
|
|
1aeb36d40e | ||
|
|
f251c9826e | ||
|
|
204063819c | ||
|
|
13f8882616 | ||
|
|
eba1d469c8 | ||
|
|
6e9980bf0f | ||
|
|
67c9c5b8ed | ||
|
|
cd3660a96d | ||
|
|
9d8c1a01ac | ||
|
|
0a77cad2dd | ||
|
|
f6869da3a0 | ||
|
|
6adad79e5c | ||
|
|
50d4d59626 | ||
|
|
56a9f1a411 | ||
|
|
84ff6ff2c5 | ||
|
|
6e35574c72 | ||
|
|
415c1d0574 | ||
|
|
cfce7086a5 | ||
|
|
c90d637a48 | ||
|
|
1926625297 | ||
|
|
63bb8e8cef | ||
|
|
583c5b48a0 | ||
|
|
d08ccbce78 | ||
|
|
5a9702771c | ||
|
|
eb91d9905b | ||
|
|
38cc85fa4c | ||
|
|
77e6ef218c | ||
|
|
464b6f2e93 | ||
|
|
20c90642f9 | ||
|
|
57e67ea8f7 | ||
|
|
c9e9628383 | ||
|
|
909f07939e | ||
|
|
a310493485 | ||
|
|
1e09df20b6 | ||
|
|
087481ac12 | ||
|
|
c941e802d4 | ||
|
|
39589bd441 | ||
|
|
2e57325dde | ||
|
|
2072301d89 | ||
|
|
b236fd3ac6 | ||
|
|
b968695e31 | ||
|
|
694f1d1623 | ||
|
|
93e4d58606 | ||
|
|
cc77caad67 | ||
|
|
f74573f5d0 | ||
|
|
deb6f0babc | ||
|
|
6dc0bdbfa3 |
1
.github/FUNDING.yml
vendored
1
.github/FUNDING.yml
vendored
@@ -1 +1,2 @@
|
||||
github: mailcow
|
||||
custom: ["https://www.servercow.de/mailcow?lang=en#sal"]
|
||||
|
||||
10
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
10
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
@@ -62,6 +62,16 @@ body:
|
||||
- nightly
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which architecture are you using?"
|
||||
description: "#### `uname -m`"
|
||||
multiple: false
|
||||
options:
|
||||
- x86
|
||||
- ARM64 (aarch64)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Operating System:"
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,7 +1,7 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: ❓ Community-driven support (Free)
|
||||
url: https://docs.mailcow.email/#get-support
|
||||
url: https://docs.mailcow.email/#community-support-and-chat
|
||||
about: Please use the community forum for questions or assistance
|
||||
- name: 🔥 Premium Support (Paid)
|
||||
url: https://www.servercow.de/mailcow?lang=en#support
|
||||
|
||||
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
12
.github/ISSUE_TEMPLATE/pr_to_nighty_template.yml
vendored
@@ -1,13 +1,3 @@
|
||||
## :memo: Brief description
|
||||
<!-- Diff summary - START -->
|
||||
<!-- Diff summary - END -->
|
||||
|
||||
|
||||
## :computer: Commits
|
||||
<!-- Diff commits - START -->
|
||||
<!-- Diff commits - END -->
|
||||
|
||||
|
||||
## :file_folder: Modified files
|
||||
<!-- Diff files - START -->
|
||||
<!-- Diff files - END -->
|
||||
<!-- Diff files - END -->
|
||||
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
38
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
<!-- _Please make sure to review and check all of these items, otherwise we might refuse your PR:_ -->
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
* [ ] I've read the [contribution guidelines](https://github.com/mailcow/mailcow-dockerized/blob/master/CONTRIBUTING.md) and wholeheartedly agree them
|
||||
|
||||
<!-- _NOTE: this tickbox is needed to fullfil on order to get your PR reviewed._ -->
|
||||
|
||||
## What does this PR include?
|
||||
|
||||
### Short Description
|
||||
|
||||
<!-- Please write a short description, what your PR does here. -->
|
||||
|
||||
### Affected Containers
|
||||
|
||||
<!-- Please list all affected Docker containers here, which you commited changes to -->
|
||||
|
||||
<!--
|
||||
|
||||
Please list them like this:
|
||||
|
||||
- container1
|
||||
- container2
|
||||
- container3
|
||||
etc.
|
||||
|
||||
-->
|
||||
|
||||
## Did you run tests?
|
||||
|
||||
### What did you tested?
|
||||
|
||||
<!-- Please write shortly, what you've tested (which components etc.). -->
|
||||
|
||||
### What were the final results? (Awaited, got)
|
||||
|
||||
<!-- Please write shortly, what your final tests results were. What did you awaited? Was the outcome the awaited one? -->
|
||||
6
.github/renovate.json
vendored
6
.github/renovate.json
vendored
@@ -15,12 +15,6 @@
|
||||
"data\/web\/inc\/lib\/vendor\/**"
|
||||
],
|
||||
"regexManagers": [
|
||||
{
|
||||
"fileMatch": ["^helper-scripts\/nextcloud.sh$"],
|
||||
"matchStrings": [
|
||||
"#\\srenovate:\\sdatasource=(?<datasource>.*?) depName=(?<depName>.*?)( versioning=(?<versioning>.*?))?( extractVersion=(?<extractVersion>.*?))?\\s.*?_VERSION=(?<currentValue>.*)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"fileMatch": ["(^|/)Dockerfile[^/]*$"],
|
||||
"matchStrings": [
|
||||
|
||||
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
37
.github/workflows/check_if_support_labeled.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Check if labeled support, if so send message and close issue
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- labeled
|
||||
jobs:
|
||||
add-comment:
|
||||
if: github.event.label.name == 'support'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- name: Add comment
|
||||
run: gh issue comment "$NUMBER" --body "$BODY"
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
BODY: |
|
||||
**THIS IS A AUTOMATED MESSAGE!**
|
||||
|
||||
It seems your issue is not a bug.
|
||||
Therefore we highly advise you to get support!
|
||||
|
||||
You can get support either by:
|
||||
- ordering a paid [support contract at Servercow](https://www.servercow.de/mailcow?lang=en#support/) (Directly from the developers) or
|
||||
- using the [community forum](https://community.mailcow.email) (**Based on volunteers! NO guaranteed answer**) or
|
||||
- using the [Telegram support channel](https://t.me/mailcow) (**Based on volunteers! NO guaranteed answer**)
|
||||
|
||||
This issue will be closed. If you think your reported issue is not a support case feel free to comment above and if so the issue will reopened.
|
||||
|
||||
- name: Close issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.SUPPORTISSUES_ACTION_PAT }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
NUMBER: ${{ github.event.issue.number }}
|
||||
run: gh issue close "$NUMBER" -r "not planned"
|
||||
@@ -10,9 +10,9 @@ jobs:
|
||||
if: github.event.pull_request.base.ref != 'staging' #check if the target branch is not staging
|
||||
steps:
|
||||
- name: Send message
|
||||
uses: thollander/actions-comment-pull-request@v2.4.3
|
||||
uses: thollander/actions-comment-pull-request@v3.0.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.CHECKIFPRISSTAGING_ACTION_PAT }}
|
||||
github-token: ${{ secrets.CHECKIFPRISSTAGING_ACTION_PAT }}
|
||||
message: |
|
||||
Thanks for contributing!
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
||||
uses: actions/stale@v9.0.0
|
||||
uses: actions/stale@v9.1.0
|
||||
with:
|
||||
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
||||
days-before-stale: 60
|
||||
|
||||
1
.github/workflows/image_builds.yml
vendored
1
.github/workflows/image_builds.yml
vendored
@@ -23,7 +23,6 @@ jobs:
|
||||
- "postfix-mailcow"
|
||||
- "rspamd-mailcow"
|
||||
- "sogo-mailcow"
|
||||
- "solr-mailcow"
|
||||
- "unbound-mailcow"
|
||||
- "watchdog-mailcow"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
2
.github/workflows/pr_to_nightly.yml
vendored
2
.github/workflows/pr_to_nightly.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run the Action
|
||||
uses: devops-infra/action-pull-request@v0.5.5
|
||||
uses: devops-infra/action-pull-request@v0.6.0
|
||||
with:
|
||||
github_token: ${{ secrets.PRTONIGHTLY_ACTION_PAT }}
|
||||
title: Automatic PR to nightly from ${{ github.event.repository.updated_at}}
|
||||
|
||||
14
.github/workflows/rebuild_backup_image.yml
vendored
14
.github/workflows/rebuild_backup_image.yml
vendored
@@ -9,6 +9,8 @@ on:
|
||||
jobs:
|
||||
docker_image_build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -19,17 +21,19 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
- name: Login to GHCR
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.BACKUPIMAGEBUILD_ACTION_DOCKERHUB_TOKEN }}
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
file: data/Dockerfiles/backup/Dockerfile
|
||||
push: true
|
||||
tags: mailcow/backup:latest
|
||||
tags: ghcr.io/mailcow/backup:latest
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
bash helper-scripts/update_postscreen_whitelist.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.mailcow_action_Update_postscreen_access_cidr_pat }}
|
||||
commit-message: update postscreen_access.cidr
|
||||
|
||||
47
.gitignore
vendored
47
.gitignore
vendored
@@ -1,6 +1,3 @@
|
||||
!data/conf/nginx/dynmaps.conf
|
||||
!data/conf/nginx/meta_exporter.conf
|
||||
!data/conf/nginx/site.conf
|
||||
!/**/.gitkeep
|
||||
*.iml
|
||||
.idea
|
||||
@@ -8,46 +5,33 @@
|
||||
data/assets/ssl-example/*
|
||||
data/assets/ssl/*
|
||||
data/conf/borgmatic/
|
||||
data/conf/clamav/whitelist.ign2
|
||||
data/conf/dovecot/acl_anyone
|
||||
data/conf/dovecot/dovecot-master.passwd
|
||||
data/conf/dovecot/dovecot-master.userdb
|
||||
data/conf/clamav/rendered_configs
|
||||
data/conf/dovecot/extra.conf
|
||||
data/conf/dovecot/global_sieve_*
|
||||
data/conf/dovecot/last_login
|
||||
data/conf/dovecot/lua
|
||||
data/conf/dovecot/mail_plugins*
|
||||
data/conf/dovecot/shared_namespace.conf
|
||||
data/conf/dovecot/sni.conf
|
||||
data/conf/dovecot/sogo-sso.conf
|
||||
data/conf/dovecot/sogo_trusted_ip.conf
|
||||
data/conf/dovecot/sql
|
||||
data/conf/nextcloud-*.bak
|
||||
data/conf/nginx/*.active
|
||||
data/conf/nginx/*.bak
|
||||
data/conf/nginx/*.conf
|
||||
data/conf/nginx/*.custom
|
||||
data/conf/dovecot/rendered_configs
|
||||
data/conf/nginx/rendered_configs
|
||||
data/conf/phpfpm/sogo-sso/sogo-sso.pass
|
||||
data/conf/phpfpm/rendered_configs
|
||||
data/conf/portainer/
|
||||
data/conf/postfix/allow_mailcow_local.regexp
|
||||
data/conf/postfix/custom_postscreen_whitelist.cidr
|
||||
data/conf/postfix/custom_transport.pcre
|
||||
data/conf/postfix/extra.cf
|
||||
data/conf/postfix/sni.map
|
||||
data/conf/postfix/sni.map.db
|
||||
data/conf/postfix/sql
|
||||
data/conf/postfix/dns_blocklists.cf
|
||||
data/conf/postfix/dnsbl_reply.map
|
||||
data/conf/postfix/rendered_configs
|
||||
data/conf/rspamd/custom/*
|
||||
data/conf/rspamd/local.d/*
|
||||
data/conf/rspamd/override.d/*
|
||||
data/conf/rspamd/rendered_configs
|
||||
data/conf/sogo/custom-theme.js
|
||||
data/conf/sogo/plist_ldap
|
||||
data/conf/sogo/sieve.creds
|
||||
data/conf/sogo/sogo-full.svg
|
||||
data/conf/sogo/cron.creds
|
||||
data/conf/sogo/custom-fulllogo.svg
|
||||
data/conf/sogo/custom-shortlogo.svg
|
||||
data/conf/sogo/custom-fulllogo.png
|
||||
data/conf/sogo/rendered_configs
|
||||
data/conf/mysql/rendered_configs
|
||||
data/gitea/
|
||||
data/gogs/
|
||||
data/hooks/clamd/*
|
||||
data/hooks/dovecot/*
|
||||
data/hooks/mariadb/*
|
||||
data/hooks/nginx/*
|
||||
data/hooks/phpfpm/*
|
||||
data/hooks/postfix/*
|
||||
data/hooks/rspamd/*
|
||||
@@ -68,3 +52,4 @@ rebuild-images.sh
|
||||
refresh_images.sh
|
||||
update_diffs/
|
||||
create_cold_standby.sh
|
||||
!data/conf/nginx/mailcow_auth.conf
|
||||
|
||||
@@ -1,33 +1,52 @@
|
||||
# Contribution Guidelines (Last modified on 18th December 2023)
|
||||
# Contribution Guidelines
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
First of all, thank you for wanting to provide a bugfix or a new feature for the mailcow community, it's because of your help that the project can continue to grow!
|
||||
|
||||
## Pull Requests (Last modified on 18th December 2023)
|
||||
As we want to keep mailcow's development structured we setup these Guidelines which helps you to create your issue/pull request accordingly.
|
||||
|
||||
**PLEASE NOTE, THAT WE MIGHT CLOSE ISSUES/PULL REQUESTS IF THEY DON'T FULLFIL OUR WRITTEN GUIDELINES WRITTEN INSIDE THIS DOCUMENT**. So please check this guidelines before you propose a Issue/Pull Request.
|
||||
|
||||
## Topics
|
||||
|
||||
- [Pull Requests](#pull-requests)
|
||||
- [Issue Reporting](#issue-reporting)
|
||||
- [Guidelines](#issue-reporting-guidelines)
|
||||
- [Issue Report Guide](#issue-report-guide)
|
||||
|
||||
## Pull Requests
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
However, please note the following regarding pull requests:
|
||||
|
||||
1. **ALWAYS** create your PR using the staging branch of your locally cloned mailcow instance, as the pull request will end up in said staging branch of mailcow once approved. Ideally, you should simply create a new branch for your pull request that is named after the type of your PR (e.g. `feat/` for function updates or `fix/` for bug fixes) and the actual content (e.g. `sogo-6.0.0` for an update from SOGo to version 6 or `html-escape` for a fix that includes escaping HTML in mailcow).
|
||||
2. Please **keep** this pull request branch **clean** and free of commits that have nothing to do with the changes you have made (e.g. commits from other users from other branches). *If you make changes to the `update.sh` script or other scripts that trigger a commit, there is usually a developer mode for clean working in this case.
|
||||
3. **Test your changes before you commit them as a pull request.** <ins>If possible</ins>, write a small **test log** or demonstrate the functionality with a **screenshot or GIF**. *We will of course also test your pull request ourselves, but proof from you will save us the question of whether you have tested your own changes yourself.*
|
||||
4. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.*
|
||||
5. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project.
|
||||
6. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort!
|
||||
2. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
3. Please **keep** this pull request branch **clean** and free of commits that have nothing to do with the changes you have made (e.g. commits from other users from other branches). *If you make changes to the `update.sh` script or other scripts that trigger a commit, there is usually a developer mode for clean working in this case.*
|
||||
4. **Test your changes before you commit them as a pull request.** <ins>If possible</ins>, write a small **test log** or demonstrate the functionality with a **screenshot or GIF**. *We will of course also test your pull request ourselves, but proof from you will save us the question of whether you have tested your own changes yourself.*
|
||||
5. **Please use** the pull request template we provide once creating a pull request. *HINT: During editing you encounter comments which looks like: `<!-- CONTENT -->`. These can be removed or kept, as they will not rendered later on GitHub! Please only create actual content without the said comments.*
|
||||
6. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.*
|
||||
7. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project.
|
||||
8. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort!
|
||||
|
||||
---
|
||||
|
||||
## Issue Reporting (Last modified on 18th December 2023)
|
||||
## Issue Reporting
|
||||
**_Last modified on 15th August 2024_**
|
||||
|
||||
If you plan to report a issue within mailcow please read and understand the following rules:
|
||||
|
||||
### Issue Reporting Guidelines
|
||||
|
||||
1. **ONLY** use the issue tracker for bug reports or improvement requests and NOT for support questions. For support questions you can either contact the [mailcow community on Telegram](https://docs.mailcow.email/#community-support-and-chat) or the mailcow team directly in exchange for a [support fee](https://docs.mailcow.email/#commercial-support).
|
||||
2. **ONLY** report an error if you have the **necessary know-how (at least the basics)** for the administration of an e-mail server and the usage of Docker. mailcow is a complex and fully-fledged e-mail server including groupware components on a Docker basement and it requires a bit of technical know-how for debugging and operating.
|
||||
3. **ONLY** report bugs that are contained in the latest mailcow release series. *The definition of the latest release series includes the last major patch (e.g. 2023-12) and all minor patches (revisions) below it (e.g. 2023-12a, b, c etc.).* New issue reports published starting from January 1, 2024 must meet this criterion, as versions below the latest releases are no longer supported by us.
|
||||
4. When reporting a problem, please be as detailed as possible and include even the smallest changes to your mailcow installation. Simply fill out the corresponding bug report form in detail and accurately to minimize possible questions.
|
||||
5. **Before you open an issue/feature request**, please first check whether a similar request already exists in the mailcow tracker on GitHub. If so, please include yourself in this request.
|
||||
6. When you create a issue/feature request: Please note that the creation does <ins>**not guarantee an instant implementation or fix by the mailcow team or the community**</ins>.
|
||||
7. Please **ALWAYS** anonymize any sensitive information in your bug report or feature request before submitting it.
|
||||
3. **ALWAYS** report/request issues/features in the english language, even though mailcow is a german based company. This is done to allow other GitHub users to reply to your issues/requests too which did not speak german or other languages besides english.
|
||||
4. **ONLY** report bugs that are contained in the latest mailcow release series. *The definition of the latest release series includes the last major patch (e.g. 2023-12) and all minor patches (revisions) below it (e.g. 2023-12a, b, c etc.).* New issue reports published starting from January 1, 2024 must meet this criterion, as versions below the latest releases are no longer supported by us.
|
||||
5. When reporting a problem, please be as detailed as possible and include even the smallest changes to your mailcow installation. Simply fill out the corresponding bug report form in detail and accurately to minimize possible questions.
|
||||
6. **Before you open an issue/feature request**, please first check whether a similar request already exists in the mailcow tracker on GitHub. If so, please include yourself in this request.
|
||||
7. When you create a issue/feature request: Please note that the creation does <ins>**not guarantee an instant implementation or fix by the mailcow team or the community**</ins>.
|
||||
8. Please **ALWAYS** anonymize any sensitive information in your bug report or feature request before submitting it.
|
||||
|
||||
### Quick guide to reporting problems:
|
||||
### Issue Report Guide
|
||||
1. Read your logs; follow them to see what the reason for your problem is.
|
||||
2. Follow the leads given to you in your logfiles and start investigating.
|
||||
3. Restarting the troubled service or the whole stack to see if the problem persists.
|
||||
@@ -36,4 +55,4 @@ If you plan to report a issue within mailcow please read and understand the foll
|
||||
6. [Create an issue](https://github.com/mailcow/mailcow-dockerized/issues/new/choose) over at our GitHub repository if you think your problem might be a bug or a missing feature you badly need. But please make sure, that you include **all the logs** and a full description to your problem.
|
||||
7. Ask your questions in our community-driven [support channels](https://docs.mailcow.email/#community-support-and-chat).
|
||||
|
||||
## When creating an issue/feature request or a pull request, you will be asked to confirm these guidelines.
|
||||
## When creating an issue/feature request or a pull request, you will be asked to confirm these guidelines.
|
||||
|
||||
16
README.md
16
README.md
@@ -13,6 +13,22 @@ You can also [get a SAL](https://www.servercow.de/mailcow?lang=en#sal) which is
|
||||
|
||||
Or just spread the word: moo.
|
||||
|
||||
## Many thanks to our GitHub Sponsors ❤️
|
||||
A big thank you to everyone supporting us on GitHub Sponsors—your contributions mean the world to us! Special thanks to the following amazing supporters:
|
||||
|
||||
### 100$/Month Sponsors
|
||||
<a href="https://www.colba.net/" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/204464723" height="58"
|
||||
/></a>
|
||||
<a href="https://www.maehdros.com/" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/173894712" height="58"
|
||||
/></a>
|
||||
|
||||
### 50$/Month Sponsors
|
||||
<a href="https://github.com/vnukhr" target=_blank><img
|
||||
src="https://avatars.githubusercontent.com/u/7805987?s=52&v=4" height="58"
|
||||
/></a>
|
||||
|
||||
## Info, documentation and support
|
||||
|
||||
Please see [the official documentation](https://docs.mailcow.email/) for installation and support instructions. 🐄
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
bash \
|
||||
@@ -15,9 +14,7 @@ RUN apk upgrade --no-cache \
|
||||
tini \
|
||||
tzdata \
|
||||
python3 \
|
||||
py3-pip \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install acme-tiny
|
||||
acme-tiny
|
||||
|
||||
COPY acme.sh /srv/acme.sh
|
||||
COPY functions.sh /srv/functions.sh
|
||||
|
||||
@@ -4,9 +4,9 @@ exec 5>&1
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
@@ -33,6 +33,10 @@ if [[ "${ONLY_MAILCOW_HOSTNAME}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
ONLY_MAILCOW_HOSTNAME=y
|
||||
fi
|
||||
|
||||
if [[ "${AUTODISCOVER_SAN}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
AUTODISCOVER_SAN=y
|
||||
fi
|
||||
|
||||
# Request individual certificate for every domain
|
||||
if [[ "${ENABLE_SSL_SNI}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
ENABLE_SSL_SNI=y
|
||||
@@ -113,13 +117,13 @@ fi
|
||||
chmod 600 ${ACME_BASE}/key.pem
|
||||
|
||||
log_f "Waiting for database..."
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
while ! /usr/bin/mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent > /dev/null; do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Database OK"
|
||||
|
||||
log_f "Waiting for Nginx..."
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx:8081); do
|
||||
until $(curl --output /dev/null --silent --head --fail http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network:8081); do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Nginx OK"
|
||||
@@ -133,8 +137,8 @@ log_f "Resolver OK"
|
||||
# Waiting for domain table
|
||||
log_f "Waiting for domain table..."
|
||||
while [[ -z ${DOMAIN_TABLE} ]]; do
|
||||
curl --silent http://nginx/ >/dev/null 2>&1
|
||||
DOMAIN_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
|
||||
curl --silent http://nginx.${COMPOSE_PROJECT_NAME}_mailcow-network/ >/dev/null 2>&1
|
||||
DOMAIN_TABLE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'domain'" -Bs)
|
||||
[[ -z ${DOMAIN_TABLE} ]] && sleep 10
|
||||
done
|
||||
log_f "OK" no_date
|
||||
@@ -211,7 +215,11 @@ while true; do
|
||||
ADDITIONAL_SAN_ARR+=($i)
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||
# Fetch certs for autoconfig and autodiscover subdomains
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
|
||||
fi
|
||||
|
||||
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
||||
# Start IP detection
|
||||
@@ -223,7 +231,7 @@ while true; do
|
||||
|
||||
#########################################
|
||||
# IP and webroot challenge verification #
|
||||
SQL_DOMAINS=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain WHERE backupmx=0 and active=1" -Bs)
|
||||
SQL_DOMAINS=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain WHERE backupmx=0 and active=1" -Bs)
|
||||
if [[ ! $? -eq 0 ]]; then
|
||||
log_f "Failed to read SQL domains, retrying in 1 minute..."
|
||||
sleep 1m
|
||||
|
||||
@@ -124,7 +124,7 @@ case "$SUCCESS" in
|
||||
;;
|
||||
*) # non-zero is non-fun
|
||||
log_f "Failed to obtain certificate ${CERT} for domains '${CERT_DOMAINS[*]}'"
|
||||
redis-cli -h redis SET ACME_FAIL_TIME "$(date +%s)"
|
||||
redis-cli -h redis -a ${REDISPASS} --no-auth-warning SET ACME_FAIL_TIME "$(date +%s)"
|
||||
exit 100${SUCCESS}
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -2,32 +2,32 @@
|
||||
|
||||
# Reading container IDs
|
||||
# Wrapping as array to ensure trimmed content when calling $NGINX etc.
|
||||
NGINX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
NGINX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
|
||||
reload_nginx(){
|
||||
echo "Reloading Nginx..."
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${NGINX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Nginx, restarting container..."; restart_container ${NGINX} ; }
|
||||
}
|
||||
|
||||
reload_dovecot(){
|
||||
echo "Reloading Dovecot..."
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${DOVECOT_RELOAD_RET} != 'success' ]] && { echo "Could not reload Dovecot, restarting container..."; restart_container ${DOVECOT} ; }
|
||||
}
|
||||
|
||||
reload_postfix(){
|
||||
echo "Reloading Postfix..."
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${POSTFIX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Postfix, restarting container..."; restart_container ${POSTFIX} ; }
|
||||
}
|
||||
|
||||
restart_container(){
|
||||
for container in $*; do
|
||||
echo "Restarting ${container}..."
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi/containers/${container}/restart --silent | jq -r '.msg')
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${container}/restart --silent | jq -r '.msg')
|
||||
echo "${C_REST_OUT}"
|
||||
done
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
RUN apt update && apt install pigz
|
||||
RUN apt update && apt install pigz -y --no-install-recommends
|
||||
66
data/Dockerfiles/bootstrap/main.py
Normal file
66
data/Dockerfiles/bootstrap/main.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import ipaddress
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
print("Received SIGTERM, exiting gracefully...")
|
||||
sys.exit(0)
|
||||
|
||||
def get_mysql_config(service_name):
|
||||
db_config = {
|
||||
"user": os.getenv("DBUSER") or os.getenv("MYSQL_USER"),
|
||||
"password": os.getenv("DBPASS") or os.getenv("MYSQL_PASSWORD"),
|
||||
"database": os.getenv("DBNAME") or os.getenv("MYSQL_DATABASE"),
|
||||
"connection_timeout": 2,
|
||||
"service_table": "service_settings",
|
||||
"service_types": [service_name]
|
||||
}
|
||||
|
||||
db_host = os.getenv("DB_HOST")
|
||||
if db_host.startswith("/"):
|
||||
db_config["host"] = "localhost"
|
||||
db_config["unix_socket"] = db_host
|
||||
else:
|
||||
db_config["host"] = db_host
|
||||
|
||||
return db_config
|
||||
|
||||
def get_redis_config():
|
||||
redis_config = {
|
||||
"read_host": os.getenv("REDIS_HOST"),
|
||||
"read_port": 6379,
|
||||
"write_host": os.getenv("REDIS_SLAVEOF_IP") or os.getenv("REDIS_HOST"),
|
||||
"write_port": int(os.getenv("REDIS_SLAVEOF_PORT") or 6379),
|
||||
"password": os.getenv("REDISPASS"),
|
||||
"db": 0
|
||||
}
|
||||
|
||||
return redis_config
|
||||
|
||||
def main():
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
container_name = os.getenv("CONTAINER_NAME")
|
||||
service_name = container_name.replace("-mailcow", "").replace("-", "")
|
||||
module_name = f"Bootstrap{service_name.capitalize()}"
|
||||
|
||||
try:
|
||||
mod = __import__(f"modules.{module_name}", fromlist=[module_name])
|
||||
Bootstrap = getattr(mod, module_name)
|
||||
except (ImportError, AttributeError) as e:
|
||||
print(f"Failed to load bootstrap module for: {container_name} → {module_name}")
|
||||
print(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
b = Bootstrap(
|
||||
container=container_name,
|
||||
service=service_name,
|
||||
db_config=get_mysql_config(service_name),
|
||||
redis_config=get_redis_config()
|
||||
)
|
||||
|
||||
b.bootstrap()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
827
data/Dockerfiles/bootstrap/modules/BootstrapBase.py
Normal file
827
data/Dockerfiles/bootstrap/modules/BootstrapBase.py
Normal file
@@ -0,0 +1,827 @@
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import shutil
|
||||
import secrets
|
||||
import string
|
||||
import subprocess
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
import redis
|
||||
import hashlib
|
||||
import json
|
||||
import psutil
|
||||
import signal
|
||||
from urllib.parse import quote
|
||||
from pathlib import Path
|
||||
import dns.resolver
|
||||
import mysql.connector
|
||||
|
||||
class BootstrapBase:
|
||||
def __init__(self, container, service, db_config, redis_config):
|
||||
self.container = container
|
||||
self.service = service
|
||||
self.db_config = db_config
|
||||
self.redis_config = redis_config
|
||||
|
||||
self.env = None
|
||||
self.env_vars = None
|
||||
self.mysql_conn = None
|
||||
self.redis_connr = None
|
||||
self.redis_connw = None
|
||||
|
||||
def render_config(self, config_dir):
|
||||
"""
|
||||
Renders multiple Jinja2 templates from a config.json file in a given directory.
|
||||
|
||||
Args:
|
||||
config_dir (str or Path): Path to the directory containing config.json
|
||||
|
||||
Behavior:
|
||||
- Renders each template defined in config.json
|
||||
- Writes the result to the specified output path
|
||||
- Also copies the rendered file to: <config_dir>/rendered_configs/<relative_output_path>
|
||||
"""
|
||||
|
||||
config_dir = Path(config_dir)
|
||||
config_path = config_dir / "config.json"
|
||||
|
||||
if not config_path.exists():
|
||||
print(f"config.json not found in: {config_dir}")
|
||||
return
|
||||
|
||||
with config_path.open("r") as f:
|
||||
entries = json.load(f)
|
||||
|
||||
for entry in entries:
|
||||
template_name = entry["template"]
|
||||
output_path = Path(entry["output"])
|
||||
clean_blank_lines = entry.get("clean_blank_lines", False)
|
||||
if_not_exists = entry.get("if_not_exists", False)
|
||||
|
||||
if if_not_exists and output_path.exists():
|
||||
print(f"Skipping {output_path} (already exists)")
|
||||
continue
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
template = self.env.get_template(template_name)
|
||||
except Exception as e:
|
||||
print(f"Template not found: {template_name} ({e})")
|
||||
continue
|
||||
|
||||
rendered = template.render(self.env_vars)
|
||||
|
||||
if clean_blank_lines:
|
||||
rendered = "\n".join(line for line in rendered.splitlines() if line.strip())
|
||||
|
||||
rendered = rendered.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
with output_path.open("w") as f:
|
||||
f.write(rendered)
|
||||
|
||||
rendered_copy_path = config_dir / "rendered_configs" / output_path.name
|
||||
rendered_copy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.copy_file(output_path, rendered_copy_path)
|
||||
|
||||
print(f"Rendered {template_name} → {output_path}")
|
||||
|
||||
def prepare_template_vars(self, overwrite_path, extra_vars = None):
|
||||
"""
|
||||
Loads and merges environment variables for Jinja2 templates from multiple sources, and registers custom template filters.
|
||||
|
||||
This method combines variables from:
|
||||
1. System environment variables
|
||||
2. The MySQL `service_settings` table (filtered by service type if defined)
|
||||
3. An optional `extra_vars` dictionary
|
||||
4. A JSON overwrite file (if it exists at the given path)
|
||||
|
||||
Also registers custom Jinja2 filters.
|
||||
|
||||
Args:
|
||||
overwrite_path (str or Path): Path to a JSON file containing key-value overrides.
|
||||
extra_vars (dict, optional): Additional variables to merge into the environment.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing all resolved template variables.
|
||||
|
||||
Raises:
|
||||
Prints errors if database fetch or JSON parsing fails, but does not raise exceptions.
|
||||
"""
|
||||
|
||||
# 1. setup filters
|
||||
self.env.filters['sha1'] = self.sha1_filter
|
||||
self.env.filters['urlencode'] = self.urlencode_filter
|
||||
self.env.filters['escape_quotes'] = self.escape_quotes_filter
|
||||
|
||||
# 2. Load env vars
|
||||
env_vars = dict(os.environ)
|
||||
|
||||
# 3. Load from MySQL
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
|
||||
if self.db_config['service_types']:
|
||||
placeholders = ','.join(['%s'] * len(self.db_config['service_types']))
|
||||
sql = f"SELECT `key`, `value` FROM {self.db_config['service_table']} WHERE `type` IN ({placeholders})"
|
||||
cursor.execute(sql, self.db_config['service_types'])
|
||||
else:
|
||||
cursor.execute(f"SELECT `key`, `value` FROM {self.db_config['service_table']}")
|
||||
|
||||
for key, value in cursor.fetchall():
|
||||
env_vars[key] = value
|
||||
|
||||
cursor.close()
|
||||
except Exception as e:
|
||||
print(f"Failed to fetch DB service settings: {e}")
|
||||
|
||||
# 4. Load extra vars
|
||||
if extra_vars:
|
||||
env_vars.update(extra_vars)
|
||||
|
||||
# 5. Load overwrites
|
||||
overwrite_path = Path(overwrite_path)
|
||||
if overwrite_path.exists():
|
||||
try:
|
||||
with overwrite_path.open("r") as f:
|
||||
overwrite_data = json.load(f)
|
||||
env_vars.update(overwrite_data)
|
||||
except Exception as e:
|
||||
print(f"Failed to parse overwrites: {e}")
|
||||
|
||||
return env_vars
|
||||
|
||||
def set_timezone(self):
|
||||
"""
|
||||
Sets the system timezone based on the TZ environment variable.
|
||||
|
||||
If the TZ variable is set, writes its value to /etc/timezone.
|
||||
"""
|
||||
|
||||
timezone = os.getenv("TZ")
|
||||
if timezone:
|
||||
with open("/etc/timezone", "w") as f:
|
||||
f.write(timezone + "\n")
|
||||
|
||||
def set_syslog_redis(self):
|
||||
"""
|
||||
Reconfigures syslog-ng to use a Redis slave configuration.
|
||||
|
||||
If the REDIS_SLAVEOF_IP environment variable is set, replaces the syslog-ng config
|
||||
with the Redis slave-specific config.
|
||||
"""
|
||||
|
||||
redis_slave_ip = os.getenv("REDIS_SLAVEOF_IP")
|
||||
if redis_slave_ip:
|
||||
shutil.copy("/etc/syslog-ng/syslog-ng-redis_slave.conf", "/etc/syslog-ng/syslog-ng.conf")
|
||||
|
||||
def rsync_file(self, src, dst, recursive=False, owner=None, mode=None):
|
||||
"""
|
||||
Copies files or directories using rsync, with optional ownership and permissions.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file or directory.
|
||||
dst (str or Path): Destination directory.
|
||||
recursive (bool): If True, copies contents recursively.
|
||||
owner (tuple): Tuple of (user, group) to set ownership.
|
||||
mode (int): File mode (e.g., 0o644) to set permissions after sync.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
dst_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
rsync_cmd = ["rsync", "-a"]
|
||||
if recursive:
|
||||
rsync_cmd.append(str(src_path) + "/")
|
||||
else:
|
||||
rsync_cmd.append(str(src_path))
|
||||
rsync_cmd.append(str(dst_path))
|
||||
|
||||
try:
|
||||
subprocess.run(rsync_cmd, check=True)
|
||||
except Exception as e:
|
||||
print(f"Rsync failed: {e}")
|
||||
|
||||
if owner:
|
||||
self.set_owner(dst_path, *owner, recursive=True)
|
||||
if mode:
|
||||
self.set_permissions(dst_path, mode)
|
||||
|
||||
def set_permissions(self, path, mode):
|
||||
"""
|
||||
Sets file or directory permissions.
|
||||
|
||||
Args:
|
||||
path (str or Path): Path to the file or directory.
|
||||
mode (int): File mode to apply, e.g., 0o644.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
"""
|
||||
|
||||
file_path = Path(path)
|
||||
if not file_path.exists():
|
||||
raise FileNotFoundError(f"Cannot chmod: {file_path} does not exist")
|
||||
os.chmod(file_path, mode)
|
||||
|
||||
def set_owner(self, path, user, group=None, recursive=False):
|
||||
"""
|
||||
Changes ownership of a file or directory.
|
||||
|
||||
Args:
|
||||
path (str or Path): Path to the file or directory.
|
||||
user (str or int): Username or UID for new owner.
|
||||
group (str or int, optional): Group name or GID; defaults to user's group if not provided.
|
||||
recursive (bool): If True and path is a directory, ownership is applied recursively.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
"""
|
||||
|
||||
# Resolve UID
|
||||
uid = int(user) if str(user).isdigit() else pwd.getpwnam(user).pw_uid
|
||||
# Resolve GID
|
||||
if group is not None:
|
||||
gid = int(group) if str(group).isdigit() else grp.getgrnam(group).gr_gid
|
||||
else:
|
||||
gid = uid if isinstance(user, int) or str(user).isdigit() else grp.getgrnam(user).gr_gid
|
||||
|
||||
p = Path(path)
|
||||
if not p.exists():
|
||||
raise FileNotFoundError(f"{path} does not exist")
|
||||
|
||||
if recursive and p.is_dir():
|
||||
for sub_path in p.rglob("*"):
|
||||
os.chown(sub_path, uid, gid)
|
||||
os.chown(p, uid, gid)
|
||||
|
||||
def fix_permissions(self, path, user=None, group=None, mode=None, recursive=False):
|
||||
"""
|
||||
Sets owner and/or permissions on a file or directory.
|
||||
|
||||
Args:
|
||||
path (str or Path): Target path.
|
||||
user (str|int, optional): Username or UID.
|
||||
group (str|int, optional): Group name or GID.
|
||||
mode (int, optional): File mode (e.g. 0o644).
|
||||
recursive (bool): Apply recursively if path is a directory.
|
||||
"""
|
||||
|
||||
if user or group:
|
||||
self.set_owner(path, user, group, recursive)
|
||||
if mode:
|
||||
self.set_permissions(path, mode)
|
||||
|
||||
def move_file(self, src, dst, overwrite=True):
|
||||
"""
|
||||
Moves a file from src to dst, optionally overwriting existing files.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file path.
|
||||
dst (str or Path): Destination path.
|
||||
overwrite (bool): If False, raises error if dst exists.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the source file does not exist.
|
||||
FileExistsError: If the destination file exists and overwrite is False.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
|
||||
if not src_path.exists():
|
||||
raise FileNotFoundError(f"Source file does not exist: {src}")
|
||||
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if dst_path.exists() and not overwrite:
|
||||
raise FileExistsError(f"Destination already exists: {dst} (set overwrite=True to overwrite)")
|
||||
|
||||
shutil.move(str(src_path), str(dst_path))
|
||||
|
||||
def copy_file(self, src, dst, overwrite=True):
|
||||
"""
|
||||
Copies a file from src to dst using shutil.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file path.
|
||||
dst (str or Path): Destination file path.
|
||||
overwrite (bool): Whether to overwrite the destination if it exists.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the source file doesn't exist.
|
||||
FileExistsError: If the destination exists and overwrite is False.
|
||||
IOError: If the copy operation fails.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
|
||||
if not src_path.is_file():
|
||||
raise FileNotFoundError(f"Source file not found: {src_path}")
|
||||
|
||||
if dst_path.exists() and not overwrite:
|
||||
raise FileExistsError(f"Destination exists: {dst_path}")
|
||||
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
shutil.copy2(src_path, dst_path)
|
||||
|
||||
def remove(self, path, recursive=False, wipe_contents=False, exclude=None):
|
||||
"""
|
||||
Removes a file or directory with optional exclusion logic.
|
||||
|
||||
Args:
|
||||
path (str or Path): The file or directory path to remove.
|
||||
recursive (bool): If True, directories will be removed recursively.
|
||||
wipe_contents (bool): If True and path is a directory, only its contents are removed, not the dir itself.
|
||||
exclude (list[str], optional): List of filenames to exclude from deletion.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
ValueError: If a directory is passed without recursive or wipe_contents.
|
||||
"""
|
||||
|
||||
|
||||
path = Path(path)
|
||||
exclude = set(exclude or [])
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Cannot remove: {path} does not exist")
|
||||
|
||||
if wipe_contents and path.is_dir():
|
||||
for child in path.iterdir():
|
||||
if child.name in exclude:
|
||||
continue
|
||||
if child.is_dir():
|
||||
shutil.rmtree(child)
|
||||
else:
|
||||
child.unlink()
|
||||
elif path.is_file():
|
||||
if path.name not in exclude:
|
||||
path.unlink()
|
||||
elif path.is_dir():
|
||||
if recursive:
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
raise ValueError(f"{path} is a directory. Use recursive=True or wipe_contents=True to remove it.")
|
||||
|
||||
def create_dir(self, path):
|
||||
"""
|
||||
Creates a directory if it does not exist.
|
||||
|
||||
If the directory is missing, it will be created along with any necessary parent directories.
|
||||
|
||||
Args:
|
||||
path (str or Path): The directory path to create.
|
||||
"""
|
||||
|
||||
dir_path = Path(path)
|
||||
if not dir_path.exists():
|
||||
print(f"Creating directory: {dir_path}")
|
||||
dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def patch_exists(self, target_file, patch_file, reverse=False):
|
||||
"""
|
||||
Checks whether a patch can be applied (or reversed) to a target file.
|
||||
|
||||
Args:
|
||||
target_file (str): File to test the patch against.
|
||||
patch_file (str): Patch file to apply.
|
||||
reverse (bool): If True, checks whether the patch can be reversed.
|
||||
|
||||
Returns:
|
||||
bool: True if patch is applicable, False otherwise.
|
||||
"""
|
||||
|
||||
cmd = ["patch", "-sfN", "--dry-run", target_file, "<", patch_file]
|
||||
if reverse:
|
||||
cmd.insert(1, "-R")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
" ".join(cmd),
|
||||
shell=True,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
return result.returncode == 0
|
||||
except Exception as e:
|
||||
print(f"Patch dry-run failed: {e}")
|
||||
return False
|
||||
|
||||
def apply_patch(self, target_file, patch_file, reverse=False):
|
||||
"""
|
||||
Applies a patch file to a target file.
|
||||
|
||||
Args:
|
||||
target_file (str): File to be patched.
|
||||
patch_file (str): Patch file containing the diff.
|
||||
reverse (bool): If True, applies the patch in reverse (rollback).
|
||||
|
||||
Logs:
|
||||
Success or failure of the patching operation.
|
||||
"""
|
||||
|
||||
cmd = ["patch", target_file, "<", patch_file]
|
||||
if reverse:
|
||||
cmd.insert(0, "-R")
|
||||
try:
|
||||
subprocess.run(" ".join(cmd), shell=True, check=True)
|
||||
print(f"Applied patch {'(reverse)' if reverse else ''} to {target_file}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Patch failed: {e}")
|
||||
|
||||
def isYes(self, value):
|
||||
"""
|
||||
Determines whether a given string represents a "yes"-like value.
|
||||
|
||||
Args:
|
||||
value (str): Input string to evaluate.
|
||||
|
||||
Returns:
|
||||
bool: True if value is "yes" or "y" (case-insensitive), otherwise False.
|
||||
"""
|
||||
return value.lower() in ["yes", "y"]
|
||||
|
||||
def is_port_open(self, host, port):
|
||||
"""
|
||||
Checks whether a TCP port is open on a given host.
|
||||
|
||||
Args:
|
||||
host (str): The hostname or IP address to check.
|
||||
port (int): The TCP port number to test.
|
||||
|
||||
Returns:
|
||||
bool: True if the port is open and accepting connections, False otherwise.
|
||||
"""
|
||||
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||
sock.settimeout(1)
|
||||
result = sock.connect_ex((host, port))
|
||||
return result == 0
|
||||
|
||||
def resolve_docker_dns_record(self, hostname, record_type="A"):
|
||||
"""
|
||||
Resolves DNS A or AAAA records for a given hostname.
|
||||
|
||||
Args:
|
||||
hostname (str): The domain to query.
|
||||
record_type (str): "A" for IPv4, "AAAA" for IPv6. Default is "A".
|
||||
|
||||
Returns:
|
||||
list[str]: A list of resolved IP addresses.
|
||||
|
||||
Raises:
|
||||
Exception: If resolution fails or no results are found.
|
||||
"""
|
||||
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.nameservers = ["127.0.0.11"]
|
||||
answers = resolver.resolve(hostname, record_type)
|
||||
return [answer.to_text() for answer in answers]
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to resolve {record_type} record for {hostname}: {e}")
|
||||
|
||||
def kill_proc(self, process_name):
|
||||
"""
|
||||
Sends SIGTERM to all running processes matching the given name.
|
||||
|
||||
Args:
|
||||
process_name (str): Name of the process to terminate.
|
||||
|
||||
Returns:
|
||||
int: Number of processes successfully signaled.
|
||||
"""
|
||||
|
||||
killed = 0
|
||||
for proc in psutil.process_iter(['name']):
|
||||
try:
|
||||
if proc.info['name'] == process_name:
|
||||
proc.send_signal(signal.SIGTERM)
|
||||
killed += 1
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
continue
|
||||
return killed
|
||||
|
||||
def connect_mysql(self, socket=None):
|
||||
"""
|
||||
Establishes a connection to the MySQL database using the provided configuration.
|
||||
|
||||
Continuously retries the connection until the database is reachable. Stores
|
||||
the connection in `self.mysql_conn` once successful.
|
||||
|
||||
Logs:
|
||||
Connection status and retry errors to stdout.
|
||||
|
||||
Args:
|
||||
socket (str, optional): Custom UNIX socket path to override the default.
|
||||
"""
|
||||
|
||||
print("Connecting to MySQL...")
|
||||
config = {
|
||||
"host": self.db_config['host'],
|
||||
"user": self.db_config['user'],
|
||||
"password": self.db_config['password'],
|
||||
"database": self.db_config['database'],
|
||||
'connection_timeout': self.db_config['connection_timeout']
|
||||
}
|
||||
if self.db_config['unix_socket']:
|
||||
config["unix_socket"] = socket or self.db_config['unix_socket']
|
||||
|
||||
while True:
|
||||
try:
|
||||
self.mysql_conn = mysql.connector.connect(**config)
|
||||
if self.mysql_conn.is_connected():
|
||||
print("MySQL is up and ready!")
|
||||
break
|
||||
except mysql.connector.Error as e:
|
||||
print(f"Waiting for MySQL... ({e})")
|
||||
time.sleep(2)
|
||||
|
||||
def close_mysql(self):
|
||||
"""
|
||||
Closes the MySQL connection if it's currently open and connected.
|
||||
|
||||
Safe to call even if the connection has already been closed.
|
||||
"""
|
||||
|
||||
if self.mysql_conn and self.mysql_conn.is_connected():
|
||||
self.mysql_conn.close()
|
||||
|
||||
def connect_redis(self, max_retries=10, delay=2):
|
||||
"""
|
||||
Connects to both read and write Redis servers and stores the connections.
|
||||
|
||||
Read server: tries indefinitely until successful.
|
||||
Write server: tries up to `max_retries` before giving up.
|
||||
|
||||
Sets:
|
||||
self.redis_connr: Redis client for read
|
||||
self.redis_connw: Redis client for write
|
||||
"""
|
||||
|
||||
use_rw = self.redis_config['read_host'] == self.redis_config['write_host'] and self.redis_config['read_port'] == self.redis_config['write_port']
|
||||
|
||||
if use_rw:
|
||||
print("Connecting to Redis read server...")
|
||||
else:
|
||||
print("Connecting to Redis server...")
|
||||
|
||||
while True:
|
||||
try:
|
||||
clientr = redis.Redis(
|
||||
host=self.redis_config['read_host'],
|
||||
port=self.redis_config['read_port'],
|
||||
password=self.redis_config['password'],
|
||||
db=self.redis_config['db'],
|
||||
decode_responses=True
|
||||
)
|
||||
if clientr.ping():
|
||||
self.redis_connr = clientr
|
||||
print("Redis read server is up and ready!")
|
||||
if use_rw:
|
||||
break
|
||||
else:
|
||||
self.redis_connw = clientr
|
||||
return
|
||||
except redis.RedisError as e:
|
||||
print(f"Waiting for Redis read... ({e})")
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
print("Connecting to Redis write server...")
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
clientw = redis.Redis(
|
||||
host=self.redis_config['write_host'],
|
||||
port=self.redis_config['write_port'],
|
||||
password=self.redis_config['password'],
|
||||
db=self.redis_config['db'],
|
||||
decode_responses=True
|
||||
)
|
||||
if clientw.ping():
|
||||
self.redis_connw = clientw
|
||||
print("Redis write server is up and ready!")
|
||||
return
|
||||
except redis.RedisError as e:
|
||||
print(f"Waiting for Redis write... (attempt {attempt + 1}/{max_retries}) ({e})")
|
||||
time.sleep(delay)
|
||||
print("Redis write server is unreachable.")
|
||||
|
||||
def close_redis(self):
|
||||
"""
|
||||
Closes the Redis read/write connections if open.
|
||||
"""
|
||||
|
||||
if self.redis_connr:
|
||||
try:
|
||||
self.redis_connr.close()
|
||||
except Exception as e:
|
||||
print(f"Error while closing Redis read connection: {e}")
|
||||
finally:
|
||||
self.redis_connr = None
|
||||
|
||||
if self.redis_connw:
|
||||
try:
|
||||
self.redis_connw.close()
|
||||
except Exception as e:
|
||||
print(f"Error while closing Redis write connection: {e}")
|
||||
finally:
|
||||
self.redis_connw = None
|
||||
|
||||
def wait_for_schema_update(self, init_file_path="init_db.inc.php", check_interval=5):
|
||||
"""
|
||||
Waits until the current database schema version matches the expected version
|
||||
defined in a PHP initialization file.
|
||||
|
||||
Compares the `version` value in the `versions` table for `application = 'db_schema'`
|
||||
with the `$db_version` value extracted from the specified PHP file.
|
||||
|
||||
Args:
|
||||
init_file_path (str): Path to the PHP file containing the expected version string.
|
||||
check_interval (int): Time in seconds to wait between version checks.
|
||||
|
||||
Logs:
|
||||
Current vs. expected schema versions until they match.
|
||||
"""
|
||||
|
||||
print("Checking database schema version...")
|
||||
|
||||
while True:
|
||||
current_version = self._get_current_db_version()
|
||||
expected_version = self._get_expected_schema_version(init_file_path)
|
||||
|
||||
if current_version == expected_version:
|
||||
print(f"DB schema is up to date: {current_version}")
|
||||
break
|
||||
|
||||
print(f"Waiting for schema update... (DB: {current_version}, Expected: {expected_version})")
|
||||
time.sleep(check_interval)
|
||||
|
||||
def wait_for_host(self, host, retry_interval=1.0, count=1):
|
||||
"""
|
||||
Waits for a host to respond to ICMP ping.
|
||||
|
||||
Args:
|
||||
host (str): Hostname or IP to ping.
|
||||
retry_interval (float): Seconds to wait between pings.
|
||||
count (int): Number of ping packets to send per check (default 1).
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["ping", "-c", str(count), host],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(f"{host} is reachable via ping.")
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
print(f"Waiting for {host}...")
|
||||
time.sleep(retry_interval)
|
||||
|
||||
def wait_for_dns(self, domain, retry_interval=1, timeout=30):
|
||||
"""
|
||||
Waits until the domain resolves via DNS using pure Python (socket).
|
||||
|
||||
Args:
|
||||
domain (str): The domain to resolve.
|
||||
retry_interval (int): Time (seconds) to wait between attempts.
|
||||
timeout (int): Maximum total wait time (seconds).
|
||||
|
||||
Returns:
|
||||
bool: True if resolved, False if timed out.
|
||||
"""
|
||||
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
socket.gethostbyname(domain)
|
||||
print(f"{domain} is resolving via DNS.")
|
||||
return True
|
||||
except socket.gaierror:
|
||||
pass
|
||||
|
||||
if time.time() - start > timeout:
|
||||
print(f"DNS resolution for {domain} timed out.")
|
||||
return False
|
||||
|
||||
print(f"Waiting for DNS for {domain}...")
|
||||
time.sleep(retry_interval)
|
||||
|
||||
def _get_current_db_version(self):
|
||||
"""
|
||||
Fetches the current schema version from the database.
|
||||
|
||||
Executes a SELECT query on the `versions` table where `application = 'db_schema'`.
|
||||
|
||||
Returns:
|
||||
str or None: The current schema version as a string, or None if not found or on error.
|
||||
|
||||
Logs:
|
||||
Error message if the query fails.
|
||||
"""
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SELECT version FROM versions WHERE application = 'db_schema'")
|
||||
result = cursor.fetchone()
|
||||
cursor.close()
|
||||
return result[0] if result else None
|
||||
except Exception as e:
|
||||
print(f"Error fetching current DB schema version: {e}")
|
||||
return None
|
||||
|
||||
def _get_expected_schema_version(self, filepath):
|
||||
"""
|
||||
Extracts the expected database schema version from a PHP initialization file.
|
||||
|
||||
Looks for a line in the form of: `$db_version = "..."` and extracts the version string.
|
||||
|
||||
Args:
|
||||
filepath (str): Path to the PHP file containing the `$db_version` definition.
|
||||
|
||||
Returns:
|
||||
str or None: The extracted version string, or None if not found or on error.
|
||||
|
||||
Logs:
|
||||
Error message if the file cannot be read or parsed.
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
match = re.search(r'\$db_version\s*=\s*"([^"]+)"', content)
|
||||
if match:
|
||||
return match.group(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading expected schema version from {filepath}: {e}")
|
||||
return None
|
||||
|
||||
def rand_pass(self, length=22):
|
||||
"""
|
||||
Generates a secure random password using allowed characters.
|
||||
|
||||
Allowed characters include upper/lowercase letters, digits, underscores, and hyphens.
|
||||
|
||||
Args:
|
||||
length (int): Length of the password to generate. Default is 22.
|
||||
|
||||
Returns:
|
||||
str: A securely generated random password string.
|
||||
"""
|
||||
|
||||
allowed_chars = string.ascii_letters + string.digits + "_-"
|
||||
return ''.join(secrets.choice(allowed_chars) for _ in range(length))
|
||||
|
||||
def run_command(self, command, check=True, shell=False, input_stream=None, log_output=True):
|
||||
"""
|
||||
Executes a shell command and optionally logs output.
|
||||
|
||||
Args:
|
||||
command (str or list): Command to run.
|
||||
check (bool): Raise if non-zero exit.
|
||||
shell (bool): Run in shell.
|
||||
input_stream: stdin stream.
|
||||
log_output (bool): If True, print output.
|
||||
|
||||
Returns:
|
||||
subprocess.CompletedProcess
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=shell,
|
||||
check=check,
|
||||
stdin=input_stream,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
if log_output:
|
||||
if result.stdout:
|
||||
print(result.stdout.strip())
|
||||
if result.stderr:
|
||||
print(result.stderr.strip())
|
||||
return result
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Command failed with exit code {e.returncode}: {e.cmd}")
|
||||
print(e.stderr.strip())
|
||||
if check:
|
||||
raise
|
||||
return e
|
||||
|
||||
def sha1_filter(self, value):
|
||||
return hashlib.sha1(value.encode()).hexdigest()
|
||||
|
||||
def urlencode_filter(self, value):
|
||||
return quote(value, safe='')
|
||||
|
||||
def escape_quotes_filter(self, value):
|
||||
return value.replace('"', r'\"')
|
||||
60
data/Dockerfiles/bootstrap/modules/BootstrapClamd.py
Normal file
60
data/Dockerfiles/bootstrap/modules/BootstrapClamd.py
Normal file
@@ -0,0 +1,60 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
class BootstrapClamd(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Skip Clamd if set
|
||||
if self.isYes(os.getenv("SKIP_CLAMD", "")):
|
||||
print("SKIP_CLAMD is set, skipping ClamAV startup...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(1)
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
print("Cleaning up tmp files...")
|
||||
tmp_files = Path("/var/lib/clamav").glob("clamav-*.tmp")
|
||||
for tmp_file in tmp_files:
|
||||
try:
|
||||
self.remove(tmp_file)
|
||||
print(f"Removed: {tmp_file}")
|
||||
except Exception as e:
|
||||
print(f"Failed to remove {tmp_file}: {e}")
|
||||
|
||||
self.create_dir("/run/clamav")
|
||||
self.create_dir("/var/lib/clamav")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Fix permissions
|
||||
self.set_owner("/var/lib/clamav", "clamav", "clamav", recursive=True)
|
||||
self.set_owner("/run/clamav", "clamav", "clamav", recursive=True)
|
||||
self.set_permissions("/var/lib/clamav", 0o755)
|
||||
for item in Path("/var/lib/clamav").glob("*"):
|
||||
self.set_permissions(item, 0o644)
|
||||
self.set_permissions("/run/clamav", 0o750)
|
||||
|
||||
# Copying to /etc/clamav to expose file as-is to administrator
|
||||
self.copy_file("/var/lib/clamav/whitelist.ign2", "/etc/clamav/whitelist.ign2")
|
||||
289
data/Dockerfiles/bootstrap/modules/BootstrapDovecot.py
Normal file
289
data/Dockerfiles/bootstrap/modules/BootstrapDovecot.py
Normal file
@@ -0,0 +1,289 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import pwd
|
||||
import hashlib
|
||||
|
||||
class BootstrapDovecot(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
self.wait_for_schema_update()
|
||||
|
||||
# Connect to Redis
|
||||
self.connect_redis()
|
||||
if self.redis_connw:
|
||||
self.redis_connw.set("DOVECOT_REPL_HEALTH", 1)
|
||||
|
||||
# Wait for DNS
|
||||
self.wait_for_dns("mailcow.email")
|
||||
|
||||
# Create missing directories
|
||||
self.create_dir("/etc/dovecot/sql/")
|
||||
self.create_dir("/etc/dovecot/auth/")
|
||||
self.create_dir("/var/vmail/_garbage")
|
||||
self.create_dir("/var/vmail/sieve")
|
||||
self.create_dir("/etc/sogo")
|
||||
self.create_dir("/var/volatile")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||
"RAND_USER": self.rand_pass(),
|
||||
"RAND_PASS": self.rand_pass(),
|
||||
"RAND_PASS2": self.rand_pass(),
|
||||
"ENV_VARS": dict(os.environ)
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
files = [
|
||||
"/etc/dovecot/mail_plugins",
|
||||
"/etc/dovecot/mail_plugins_imap",
|
||||
"/etc/dovecot/mail_plugins_lmtp",
|
||||
"/templates/quarantine.tpl"
|
||||
]
|
||||
for file in files:
|
||||
self.set_permissions(file, 0o644)
|
||||
|
||||
try:
|
||||
# Migrate old sieve_after file
|
||||
self.move_file("/etc/dovecot/sieve_after", "/var/vmail/sieve/global_sieve_after.sieve")
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
# Cleanup random user maildirs
|
||||
self.remove("/var/vmail/mailcow.local", wipe_contents=True)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
# Cleanup PIDs
|
||||
self.remove("/tmp/quarantine_notify.pid")
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
self.remove("/var/run/dovecot/master.pid")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Check permissions of vmail/index/garbage directories.
|
||||
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
||||
files = [
|
||||
"/var/vmail",
|
||||
"/var/vmail/_garbage",
|
||||
"/var/vmail_index"
|
||||
]
|
||||
for file in files:
|
||||
path = Path(file)
|
||||
try:
|
||||
stat_info = path.stat()
|
||||
current_user = pwd.getpwuid(stat_info.st_uid).pw_name
|
||||
|
||||
if current_user != "vmail":
|
||||
print(f"Ownership of {path} is {current_user}, fixing to vmail:vmail...")
|
||||
self.set_owner(path, user="vmail", group="vmail", recursive=True)
|
||||
else:
|
||||
print(f"Ownership of {path} is already correct (vmail)")
|
||||
except Exception as e:
|
||||
print(f"Error checking ownership of {path}: {e}")
|
||||
|
||||
# Compile sieve scripts
|
||||
files = [
|
||||
"/var/vmail/sieve/global_sieve_before.sieve",
|
||||
"/var/vmail/sieve/global_sieve_after.sieve",
|
||||
"/usr/lib/dovecot/sieve/report-spam.sieve",
|
||||
"/usr/lib/dovecot/sieve/report-ham.sieve",
|
||||
]
|
||||
for file in files:
|
||||
self.run_command(["sievec", file], check=False)
|
||||
|
||||
# Fix permissions
|
||||
for path in Path("/etc/dovecot/sql").glob("*.conf"):
|
||||
self.set_owner(path, "root", "root")
|
||||
self.set_permissions(path, 0o640)
|
||||
|
||||
files = [
|
||||
"/etc/dovecot/auth/passwd-verify.lua",
|
||||
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-sieve*"),
|
||||
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-quota*")
|
||||
]
|
||||
for file in files:
|
||||
self.set_owner(file, "root", "dovecot")
|
||||
|
||||
self.set_permissions("/etc/dovecot/auth/passwd-verify.lua", 0o640)
|
||||
|
||||
for file in ["/var/vmail/sieve", "/var/volatile", "/var/vmail_index"]:
|
||||
self.set_owner(file, "vmail", "vmail", recursive=True)
|
||||
|
||||
self.run_command(["adduser", "vmail", "tty"])
|
||||
self.run_command(["chmod", "g+rw", "/dev/console"])
|
||||
self.set_owner("/dev/console", "root", "tty")
|
||||
files = [
|
||||
"/usr/lib/dovecot/sieve/rspamd-pipe-ham",
|
||||
"/usr/lib/dovecot/sieve/rspamd-pipe-spam",
|
||||
"/usr/local/bin/imapsync_runner.pl",
|
||||
"/usr/local/bin/imapsync",
|
||||
"/usr/local/bin/trim_logs.sh",
|
||||
"/usr/local/bin/sa-rules.sh",
|
||||
"/usr/local/bin/clean_q_aged.sh",
|
||||
"/usr/local/bin/maildir_gc.sh",
|
||||
"/usr/local/sbin/stop-supervisor.sh",
|
||||
"/usr/local/bin/quota_notify.py",
|
||||
"/usr/local/bin/repl_health.sh",
|
||||
"/usr/local/bin/optimize-fts.sh"
|
||||
]
|
||||
for file in files:
|
||||
self.set_permissions(file, 0o755)
|
||||
|
||||
# Collect SA rules once now
|
||||
self.run_command(["/usr/local/bin/sa-rules.sh"], check=False)
|
||||
|
||||
self.generate_mail_crypt_keys()
|
||||
self.cleanup_imapsync_jobs()
|
||||
self.generate_guid_version()
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
"""
|
||||
Returns a mapping of domains to their certificate directory path.
|
||||
|
||||
Example:
|
||||
{
|
||||
"example.com": "/etc/ssl/mail/example.com/",
|
||||
"www.example.com": "/etc/ssl/mail/example.com/"
|
||||
}
|
||||
"""
|
||||
sni_map = {}
|
||||
base_path = Path("/etc/ssl/mail")
|
||||
if not base_path.exists():
|
||||
return sni_map
|
||||
|
||||
for cert_dir in base_path.iterdir():
|
||||
if not cert_dir.is_dir():
|
||||
continue
|
||||
|
||||
domains_file = cert_dir / "domains"
|
||||
cert_file = cert_dir / "cert.pem"
|
||||
key_file = cert_dir / "key.pem"
|
||||
|
||||
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||
continue
|
||||
|
||||
with open(domains_file, "r") as f:
|
||||
domains = [line.strip() for line in f if line.strip()]
|
||||
for domain in domains:
|
||||
sni_map[domain] = str(cert_dir)
|
||||
|
||||
return sni_map
|
||||
|
||||
def generate_mail_crypt_keys(self):
|
||||
"""
|
||||
Ensures mail_crypt EC keypair exists. Generates if missing. Adjusts permissions.
|
||||
"""
|
||||
|
||||
key_dir = Path("/mail_crypt")
|
||||
priv_key = key_dir / "ecprivkey.pem"
|
||||
pub_key = key_dir / "ecpubkey.pem"
|
||||
|
||||
# Generate keys if they don't exist or are empty
|
||||
if not priv_key.exists() or priv_key.stat().st_size == 0 or \
|
||||
not pub_key.exists() or pub_key.stat().st_size == 0:
|
||||
self.run_command(
|
||||
"openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem",
|
||||
shell=True
|
||||
)
|
||||
self.run_command(
|
||||
"openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem",
|
||||
shell=True
|
||||
)
|
||||
|
||||
# Set ownership to UID 401 (dovecot)
|
||||
self.set_owner(priv_key, user='401')
|
||||
self.set_owner(pub_key, user='401')
|
||||
|
||||
def cleanup_imapsync_jobs(self):
|
||||
"""
|
||||
Cleans up stale imapsync locks and resets running status in the database.
|
||||
|
||||
Deletes the imapsync_busy.lock file if present and sets `is_running` to 0
|
||||
in the `imapsync` table, if it exists.
|
||||
|
||||
Logs:
|
||||
Any issues with file operations or SQL execution.
|
||||
"""
|
||||
|
||||
lock_file = Path("/tmp/imapsync_busy.lock")
|
||||
if lock_file.exists():
|
||||
try:
|
||||
lock_file.unlink()
|
||||
except Exception as e:
|
||||
print(f"Failed to remove lock file: {e}")
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SHOW TABLES LIKE 'imapsync'")
|
||||
result = cursor.fetchone()
|
||||
if result:
|
||||
cursor.execute("UPDATE imapsync SET is_running='0'")
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
except Exception as e:
|
||||
print(f"Error updating imapsync table: {e}")
|
||||
|
||||
def generate_guid_version(self):
|
||||
"""
|
||||
Waits for the `versions` table to be created, then generates a GUID
|
||||
based on the mail hostname and Dovecot's public key and inserts it
|
||||
into the `versions` table.
|
||||
|
||||
If the key or hash is missing or malformed, marks it as INVALID.
|
||||
"""
|
||||
|
||||
try:
|
||||
result = self.run_command(["doveconf", "-P"], check=True, log_output=False)
|
||||
pubkey_path = None
|
||||
for line in result.stdout.splitlines():
|
||||
if "mail_crypt_global_public_key" in line:
|
||||
parts = line.split('<')
|
||||
if len(parts) > 1:
|
||||
pubkey_path = parts[1].strip()
|
||||
break
|
||||
|
||||
if pubkey_path and Path(pubkey_path).exists():
|
||||
with open(pubkey_path, "rb") as key_file:
|
||||
pubkey_data = key_file.read()
|
||||
|
||||
hostname = self.env_vars.get("MAILCOW_HOSTNAME", "mailcow.local").encode("utf-8")
|
||||
concat = hostname + pubkey_data
|
||||
guid = hashlib.sha256(concat).hexdigest()
|
||||
|
||||
if len(guid) == 64:
|
||||
version_value = guid
|
||||
else:
|
||||
version_value = "INVALID"
|
||||
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(
|
||||
"REPLACE INTO versions (application, version) VALUES (%s, %s)",
|
||||
("GUID", version_value)
|
||||
)
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
else:
|
||||
print("Public key not found or unreadable. GUID not generated.")
|
||||
except Exception as e:
|
||||
print(f"Failed to generate or store GUID: {e}")
|
||||
163
data/Dockerfiles/bootstrap/modules/BootstrapMysql.py
Normal file
163
data/Dockerfiles/bootstrap/modules/BootstrapMysql.py
Normal file
@@ -0,0 +1,163 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
class BootstrapMysql(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
dbuser = "root"
|
||||
dbpass = os.getenv("MYSQL_ROOT_PASSWORD", "")
|
||||
socket = "/tmp/mysql-temp.sock"
|
||||
|
||||
# Check if mysql has been initialized
|
||||
if os.path.exists("/var/lib/mysql/mysql/db.frm"):
|
||||
print("Starting temporary mysqld for upgrade...")
|
||||
self.start_temporary(socket)
|
||||
|
||||
self.connect_mysql(socket)
|
||||
|
||||
print("Running mysql_upgrade...")
|
||||
self.upgrade_mysql(dbuser, dbpass, socket)
|
||||
print("Checking timezone support with CONVERT_TZ...")
|
||||
self.check_and_import_timezone_support(dbuser, dbpass, socket)
|
||||
|
||||
print("Shutting down temporary mysqld...")
|
||||
self.close_mysql()
|
||||
self.stop_temporary(dbuser, dbpass, socket)
|
||||
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
def start_temporary(self, socket):
|
||||
"""
|
||||
Starts a temporary mysqld process in the background using the given UNIX socket.
|
||||
|
||||
The server is started with networking disabled (--skip-networking).
|
||||
|
||||
Args:
|
||||
socket (str): Path to the UNIX socket file for MySQL to listen on.
|
||||
|
||||
Returns:
|
||||
subprocess.Popen: The running mysqld process object.
|
||||
"""
|
||||
|
||||
return subprocess.Popen([
|
||||
"mysqld",
|
||||
"--user=mysql",
|
||||
"--skip-networking",
|
||||
f"--socket={socket}"
|
||||
])
|
||||
|
||||
def stop_temporary(self, dbuser, dbpass, socket):
|
||||
"""
|
||||
Shuts down the temporary mysqld instance gracefully.
|
||||
|
||||
Uses mariadb-admin to issue a shutdown command to the running server.
|
||||
|
||||
Args:
|
||||
dbuser (str): The MySQL username with shutdown privileges (typically 'root').
|
||||
dbpass (str): The password for the MySQL user.
|
||||
socket (str): Path to the UNIX socket the server is listening on.
|
||||
"""
|
||||
|
||||
self.run_command([
|
||||
"mariadb-admin",
|
||||
"shutdown",
|
||||
f"--socket={socket}",
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}"
|
||||
])
|
||||
|
||||
def upgrade_mysql(self, dbuser, dbpass, socket, max_retries=5, wait_interval=3):
|
||||
"""
|
||||
Executes mysql_upgrade to check and fix any schema or table incompatibilities.
|
||||
|
||||
Retries the upgrade command if it fails, up to a maximum number of attempts.
|
||||
|
||||
Args:
|
||||
dbuser (str): MySQL username with privilege to perform the upgrade.
|
||||
dbpass (str): Password for the MySQL user.
|
||||
socket (str): Path to the MySQL UNIX socket for local communication.
|
||||
max_retries (int): Maximum number of attempts before giving up. Default is 5.
|
||||
wait_interval (int): Number of seconds to wait between retries. Default is 3.
|
||||
|
||||
Returns:
|
||||
bool: True if upgrade succeeded, False if all attempts failed.
|
||||
"""
|
||||
|
||||
retries = 0
|
||||
while retries < max_retries:
|
||||
result = self.run_command([
|
||||
"mysql_upgrade",
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}",
|
||||
f"--socket={socket}"
|
||||
], check=False)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("mysql_upgrade completed successfully.")
|
||||
break
|
||||
else:
|
||||
print(f"mysql_upgrade failed (try {retries+1}/{max_retries})")
|
||||
retries += 1
|
||||
time.sleep(wait_interval)
|
||||
else:
|
||||
print("mysql_upgrade failed after all retries.")
|
||||
return False
|
||||
|
||||
def check_and_import_timezone_support(self, dbuser, dbpass, socket):
|
||||
"""
|
||||
Checks if MySQL supports timezone conversion (CONVERT_TZ).
|
||||
If not, it imports timezone info using mysql_tzinfo_to_sql piped into mariadb.
|
||||
"""
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC')")
|
||||
result = cursor.fetchone()
|
||||
cursor.close()
|
||||
|
||||
if not result or result[0] is None:
|
||||
print("Timezone conversion failed or returned NULL. Importing timezone info...")
|
||||
|
||||
# Use mysql_tzinfo_to_sql piped into mariadb
|
||||
tz_dump = subprocess.Popen(
|
||||
["mysql_tzinfo_to_sql", "/usr/share/zoneinfo"],
|
||||
stdout=subprocess.PIPE
|
||||
)
|
||||
|
||||
self.run_command([
|
||||
"mariadb",
|
||||
"--socket", socket,
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}",
|
||||
"mysql"
|
||||
], input_stream=tz_dump.stdout)
|
||||
|
||||
tz_dump.stdout.close()
|
||||
tz_dump.wait()
|
||||
|
||||
print("Timezone info successfully imported.")
|
||||
else:
|
||||
print(f"Timezone support is working. Sample result: {result[0]}")
|
||||
except Exception as e:
|
||||
print(f"Failed to verify or import timezone info: {e}")
|
||||
65
data/Dockerfiles/bootstrap/modules/BootstrapNginx.py
Normal file
65
data/Dockerfiles/bootstrap/modules/BootstrapNginx.py
Normal file
@@ -0,0 +1,65 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
|
||||
class BootstrapNginx(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# wait for Hosts
|
||||
php_service = os.getenv("PHPFPM_HOST") or "php-fpm-mailcow"
|
||||
rspamd_service = os.getenv("RSPAMD_HOST") or "rspamd-mailcow"
|
||||
sogo_service = os.getenv("SOGO_HOST")
|
||||
self.wait_for_host(php_service)
|
||||
if not self.isYes(os.getenv("SKIP_RSPAMD", False)):
|
||||
self.wait_for_host(rspamd_service)
|
||||
if not self.isYes(os.getenv("SKIP_SOGO", False)):
|
||||
self.wait_for_host(sogo_service)
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||
'TRUSTED_PROXIES': [item.strip() for item in os.getenv("TRUSTED_PROXIES", "").split(",") if item.strip()],
|
||||
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in os.getenv("ADDITIONAL_SERVER_NAMES", "").split(",") if item.strip()],
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
ssl_dir = '/etc/ssl/mail/'
|
||||
valid_cert_dirs = []
|
||||
for d in os.listdir(ssl_dir):
|
||||
full_path = os.path.join(ssl_dir, d)
|
||||
if not os.path.isdir(full_path):
|
||||
continue
|
||||
|
||||
cert_path = os.path.join(full_path, 'cert.pem')
|
||||
key_path = os.path.join(full_path, 'key.pem')
|
||||
domains_path = os.path.join(full_path, 'domains')
|
||||
|
||||
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
||||
with open(domains_path, 'r') as file:
|
||||
domains = file.read().strip()
|
||||
domains_list = domains.split()
|
||||
if domains_list and os.getenv("MAILCOW_HOSTNAME", "") not in domains_list:
|
||||
valid_cert_dirs.append({
|
||||
'cert_path': full_path + '/',
|
||||
'domains': domains
|
||||
})
|
||||
|
||||
return valid_cert_dirs
|
||||
202
data/Dockerfiles/bootstrap/modules/BootstrapPhpfpm.py
Normal file
202
data/Dockerfiles/bootstrap/modules/BootstrapPhpfpm.py
Normal file
@@ -0,0 +1,202 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
import ipaddress
|
||||
|
||||
class BootstrapPhpfpm(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
self.connect_mysql()
|
||||
self.connect_redis()
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
# Prepare Redis and MySQL Database
|
||||
# TODO: move to dockerapi
|
||||
if self.isYes(os.getenv("MASTER", "")):
|
||||
print("We are master, preparing...")
|
||||
self.prepare_redis()
|
||||
self.setup_apikeys(
|
||||
os.getenv("API_ALLOW_FROM", "").strip(),
|
||||
os.getenv("API_KEY", "").strip(),
|
||||
os.getenv("API_KEY_READ_ONLY", "").strip()
|
||||
)
|
||||
self.setup_mysql_events()
|
||||
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
self.copy_file("/usr/local/etc/php/conf.d/opcache-recommended.ini", "/php-conf/opcache-recommended.ini")
|
||||
self.copy_file("/usr/local/etc/php-fpm.d/z-pools.conf", "/php-conf/pools.conf")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/zzz-other.ini", "/php-conf/other.ini")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/upload.ini", "/php-conf/upload.ini")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/session_store.ini", "/php-conf/session_store.ini")
|
||||
|
||||
self.set_owner("/global_sieve", 82, 82, recursive=True)
|
||||
self.set_owner("/web/templates/cache", 82, 82, recursive=True)
|
||||
self.remove("/web/templates/cache", wipe_contents=True, exclude=[".gitkeep"])
|
||||
|
||||
print("Running DB init...")
|
||||
self.run_command(["php", "-c", "/usr/local/etc/php", "-f", "/web/inc/init_db.inc.php"], check=False)
|
||||
|
||||
def prepare_redis(self):
|
||||
print("Setting default Redis keys if missing...")
|
||||
|
||||
# Q_RELEASE_FORMAT
|
||||
if self.redis_connw and self.redis_connr.get("Q_RELEASE_FORMAT") is None:
|
||||
self.redis_connw.set("Q_RELEASE_FORMAT", "raw")
|
||||
|
||||
# Q_MAX_AGE
|
||||
if self.redis_connw and self.redis_connr.get("Q_MAX_AGE") is None:
|
||||
self.redis_connw.set("Q_MAX_AGE", 365)
|
||||
|
||||
# PASSWD_POLICY hash defaults
|
||||
if self.redis_connw and self.redis_connr.hget("PASSWD_POLICY", "length") is None:
|
||||
self.redis_connw.hset("PASSWD_POLICY", mapping={
|
||||
"length": 6,
|
||||
"chars": 0,
|
||||
"special_chars": 0,
|
||||
"lowerupper": 0,
|
||||
"numbers": 0
|
||||
})
|
||||
|
||||
# DOMAIN_MAP
|
||||
print("Rebuilding DOMAIN_MAP from MySQL...")
|
||||
if self.redis_connw:
|
||||
self.redis_connw.delete("DOMAIN_MAP")
|
||||
domains = set()
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
|
||||
cursor.execute("SELECT domain FROM domain")
|
||||
domains.update(row[0] for row in cursor.fetchall())
|
||||
cursor.execute("SELECT alias_domain FROM alias_domain")
|
||||
domains.update(row[0] for row in cursor.fetchall())
|
||||
|
||||
cursor.close()
|
||||
|
||||
if domains:
|
||||
for domain in domains:
|
||||
if self.redis_connw:
|
||||
self.redis_conn.hset("DOMAIN_MAP", domain, 1)
|
||||
print(f"{len(domains)} domains added to DOMAIN_MAP.")
|
||||
else:
|
||||
print("No domains found to insert into DOMAIN_MAP.")
|
||||
except Exception as e:
|
||||
print(f"Failed to rebuild DOMAIN_MAP: {e}")
|
||||
|
||||
def setup_apikeys(self, api_allow_from, api_key_rw, api_key_ro):
|
||||
if not api_allow_from or api_allow_from == "invalid":
|
||||
return
|
||||
|
||||
print("Validating API_ALLOW_FROM IPs...")
|
||||
ip_list = [ip.strip() for ip in api_allow_from.split(",")]
|
||||
validated_ips = []
|
||||
|
||||
for ip in ip_list:
|
||||
try:
|
||||
ipaddress.ip_network(ip, strict=False)
|
||||
validated_ips.append(ip)
|
||||
except ValueError:
|
||||
continue
|
||||
if not validated_ips:
|
||||
print("No valid IPs found in API_ALLOW_FROM")
|
||||
return
|
||||
|
||||
allow_from_str = ",".join(validated_ips)
|
||||
cursor = self.mysql_conn.cursor()
|
||||
try:
|
||||
if api_key_rw and api_key_rw != "invalid":
|
||||
print("Setting RW API key...")
|
||||
cursor.execute("DELETE FROM api WHERE access = 'rw'")
|
||||
cursor.execute(
|
||||
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||
(api_key_rw, 1, allow_from_str, "rw")
|
||||
)
|
||||
|
||||
if api_key_ro and api_key_ro != "invalid":
|
||||
print("Setting RO API key...")
|
||||
cursor.execute("DELETE FROM api WHERE access = 'ro'")
|
||||
cursor.execute(
|
||||
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||
(api_key_ro, 1, allow_from_str, "ro")
|
||||
)
|
||||
|
||||
self.mysql_conn.commit()
|
||||
print("API key(s) set successfully.")
|
||||
except Exception as e:
|
||||
print(f"Failed to configure API keys: {e}")
|
||||
self.mysql_conn.rollback()
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def setup_mysql_events(self):
|
||||
print("Creating scheduled MySQL EVENTS...")
|
||||
|
||||
queries = [
|
||||
"DROP EVENT IF EXISTS clean_spamalias;",
|
||||
"""
|
||||
CREATE EVENT clean_spamalias
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
||||
""",
|
||||
"DROP EVENT IF EXISTS clean_oauth2;",
|
||||
"""
|
||||
CREATE EVENT clean_oauth2
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
BEGIN
|
||||
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
||||
END;
|
||||
""",
|
||||
"DROP EVENT IF EXISTS clean_sasl_log;",
|
||||
"""
|
||||
CREATE EVENT clean_sasl_log
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
BEGIN
|
||||
DELETE sasl_log.* FROM sasl_log
|
||||
LEFT JOIN (
|
||||
SELECT username, service, MAX(datetime) AS lastdate
|
||||
FROM sasl_log
|
||||
GROUP BY username, service
|
||||
) AS last
|
||||
ON sasl_log.username = last.username AND sasl_log.service = last.service
|
||||
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY)
|
||||
AND datetime < lastdate;
|
||||
|
||||
DELETE FROM sasl_log
|
||||
WHERE username NOT IN (SELECT username FROM mailbox)
|
||||
AND datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
for query in queries:
|
||||
cursor.execute(query)
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
print("MySQL EVENTS created successfully.")
|
||||
except Exception as e:
|
||||
print(f"Failed to create MySQL EVENTS: {e}")
|
||||
self.mysql_conn.rollback()
|
||||
83
data/Dockerfiles/bootstrap/modules/BootstrapPostfix.py
Normal file
83
data/Dockerfiles/bootstrap/modules/BootstrapPostfix.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
|
||||
class BootstrapPostfix(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Wait for DNS
|
||||
self.wait_for_dns("mailcow.email")
|
||||
|
||||
self.create_dir("/opt/postfix/conf/sql/")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs()
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Set Syslog redis")
|
||||
self.set_syslog_redis()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Create aliases DB
|
||||
self.run_command(["newaliases"])
|
||||
|
||||
# Create SNI Config
|
||||
self.run_command(["postmap", "-F", "hash:/opt/postfix/conf/sni.map"])
|
||||
|
||||
# Fix Postfix permissions
|
||||
self.set_owner("/opt/postfix/conf/sql", user="root", group="postfix", recursive=True)
|
||||
self.set_owner("/opt/postfix/conf/custom_transport.pcre", user="root", group="postfix")
|
||||
for cf_file in Path("/opt/postfix/conf/sql").glob("*.cf"):
|
||||
self.set_permissions(cf_file, 0o640)
|
||||
self.set_permissions("/opt/postfix/conf/custom_transport.pcre", 0o640)
|
||||
self.set_owner("/var/spool/postfix/public", user="root", group="postdrop", recursive=True)
|
||||
self.set_owner("/var/spool/postfix/maildrop", user="root", group="postdrop", recursive=True)
|
||||
self.run_command(["postfix", "set-permissions"], check=False)
|
||||
|
||||
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
||||
pid_file = Path("/var/spool/postfix/pid/master.pid")
|
||||
if pid_file.exists():
|
||||
print(f"Removing stale Postfix PID file: {pid_file}")
|
||||
pid_file.unlink()
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
certs = {}
|
||||
base_path = Path("/etc/ssl/mail")
|
||||
if not base_path.exists():
|
||||
return certs
|
||||
|
||||
for cert_dir in base_path.iterdir():
|
||||
if not cert_dir.is_dir():
|
||||
continue
|
||||
|
||||
domains_file = cert_dir / "domains"
|
||||
cert_file = cert_dir / "cert.pem"
|
||||
key_file = cert_dir / "key.pem"
|
||||
|
||||
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||
continue
|
||||
|
||||
with open(domains_file, "r") as f:
|
||||
domains = [line.strip() for line in f if line.strip()]
|
||||
if domains:
|
||||
certs[str(cert_dir)] = domains
|
||||
|
||||
return certs
|
||||
132
data/Dockerfiles/bootstrap/modules/BootstrapRspamd.py
Normal file
132
data/Dockerfiles/bootstrap/modules/BootstrapRspamd.py
Normal file
@@ -0,0 +1,132 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import time
|
||||
import platform
|
||||
|
||||
class BootstrapRspamd(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_redis()
|
||||
|
||||
# get dovecot ips
|
||||
dovecot_v4 = []
|
||||
dovecot_v6 = []
|
||||
while not dovecot_v4 and not dovecot_v6:
|
||||
try:
|
||||
dovecot_v4 = self.resolve_docker_dns_record("dovecot-mailcow", "A")
|
||||
dovecot_v6 = self.resolve_docker_dns_record("dovecot-mailcow", "AAAA")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
if not dovecot_v4 and not dovecot_v6:
|
||||
print("Waiting for Dovecot IPs...")
|
||||
time.sleep(3)
|
||||
|
||||
# get rspamd ips
|
||||
rspamd_v4 = []
|
||||
rspamd_v6 = []
|
||||
while not rspamd_v4 and not rspamd_v6:
|
||||
try:
|
||||
rspamd_v4 = self.resolve_docker_dns_record("rspamd-mailcow", "A")
|
||||
rspamd_v6 = self.resolve_docker_dns_record("rspamd-mailcow", "AAAA")
|
||||
except Exception:
|
||||
print(e)
|
||||
if not rspamd_v4 and not rspamd_v6:
|
||||
print("Waiting for Rspamd IPs...")
|
||||
time.sleep(3)
|
||||
|
||||
# wait for Services
|
||||
services = [
|
||||
["php-fpm-mailcow", 9001],
|
||||
["php-fpm-mailcow", 9002]
|
||||
]
|
||||
for service in services:
|
||||
while not self.is_port_open(service[0], service[1]):
|
||||
print(f"Waiting for {service[0]} on port {service[1]}...")
|
||||
time.sleep(1)
|
||||
print(f"Service {service[0]} on port {service[1]} is ready!")
|
||||
|
||||
for dir_path in ["/etc/rspamd/plugins.d", "/etc/rspamd/custom"]:
|
||||
Path(dir_path).mkdir(parents=True, exist_ok=True)
|
||||
for file_path in ["/etc/rspamd/rspamd.conf.local", "/etc/rspamd/rspamd.conf.override"]:
|
||||
Path(file_path).touch(exist_ok=True)
|
||||
self.set_permissions("/var/lib/rspamd", 0o755)
|
||||
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"DOVECOT_V4": dovecot_v4[0],
|
||||
"DOVECOT_V6": dovecot_v6[0],
|
||||
"RSPAMD_V4": rspamd_v4[0],
|
||||
"RSPAMD_V6": rspamd_v6[0],
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Fix missing default global maps, if any
|
||||
# These exists in mailcow UI and should not be removed
|
||||
files = [
|
||||
"/etc/rspamd/custom/global_mime_from_blacklist.map",
|
||||
"/etc/rspamd/custom/global_rcpt_blacklist.map",
|
||||
"/etc/rspamd/custom/global_smtp_from_blacklist.map",
|
||||
"/etc/rspamd/custom/global_mime_from_whitelist.map",
|
||||
"/etc/rspamd/custom/global_rcpt_whitelist.map",
|
||||
"/etc/rspamd/custom/global_smtp_from_whitelist.map",
|
||||
"/etc/rspamd/custom/bad_languages.map",
|
||||
"/etc/rspamd/custom/sa-rules",
|
||||
"/etc/rspamd/custom/dovecot_trusted.map",
|
||||
"/etc/rspamd/custom/rspamd_trusted.map",
|
||||
"/etc/rspamd/custom/mailcow_networks.map",
|
||||
"/etc/rspamd/custom/ip_wl.map",
|
||||
"/etc/rspamd/custom/fishy_tlds.map",
|
||||
"/etc/rspamd/custom/bad_words.map",
|
||||
"/etc/rspamd/custom/bad_asn.map",
|
||||
"/etc/rspamd/custom/bad_words_de.map",
|
||||
"/etc/rspamd/custom/bulk_header.map",
|
||||
"/etc/rspamd/custom/bad_header.map"
|
||||
]
|
||||
for file in files:
|
||||
path = Path(file)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.touch(exist_ok=True)
|
||||
|
||||
# Fix permissions
|
||||
paths_rspamd = [
|
||||
"/var/lib/rspamd",
|
||||
"/etc/rspamd/local.d",
|
||||
"/etc/rspamd/override.d",
|
||||
"/etc/rspamd/rspamd.conf.local",
|
||||
"/etc/rspamd/rspamd.conf.override",
|
||||
"/etc/rspamd/plugins.d"
|
||||
]
|
||||
for path in paths_rspamd:
|
||||
self.set_owner(path, "_rspamd", "_rspamd", recursive=True)
|
||||
self.set_owner("/etc/rspamd/custom", "_rspamd", "_rspamd")
|
||||
self.set_permissions("/etc/rspamd/custom", 0o755)
|
||||
|
||||
custom_path = Path("/etc/rspamd/custom")
|
||||
for child in custom_path.iterdir():
|
||||
if child.is_file():
|
||||
self.set_owner(child, 82, 82)
|
||||
self.set_permissions(child, 0o644)
|
||||
|
||||
# Provide additional lua modules
|
||||
arch = platform.machine()
|
||||
self.run_command(["ln", "-s", f"/usr/lib/{arch}-linux-gnu/liblua5.1-cjson.so.0.0.0", "/usr/lib/rspamd/cjson.so"], check=False)
|
||||
138
data/Dockerfiles/bootstrap/modules/BootstrapSogo.py
Normal file
138
data/Dockerfiles/bootstrap/modules/BootstrapSogo.py
Normal file
@@ -0,0 +1,138 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
class BootstrapSogo(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Skip SOGo if set
|
||||
if self.isYes(os.getenv("SKIP_SOGO", "")):
|
||||
print("SKIP_SOGO is set, skipping SOGo startup...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(1)
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Wait until port is free
|
||||
while self.is_port_open(os.getenv("SOGO_HOST"), 20000):
|
||||
print("Port 20000 still in use — terminating sogod...")
|
||||
self.kill_proc("sogod")
|
||||
time.sleep(3)
|
||||
|
||||
# Wait for schema to update to expected version
|
||||
self.wait_for_schema_update(init_file_path="init_db.inc.php")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"SQL_DOMAINS": self.get_domains(),
|
||||
"IAM_SETTINGS": self.get_identity_provider_settings()
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Set Syslog redis")
|
||||
self.set_syslog_redis()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
print("Fix permissions")
|
||||
self.set_owner("/var/lib/sogo", "sogo", "sogo", recursive=True)
|
||||
self.set_permissions("/var/lib/sogo/GNUstep/Defaults/sogod.plist", 0o600)
|
||||
|
||||
# Rename custom logo
|
||||
logo_src = Path("/etc/sogo/sogo-full.svg")
|
||||
if logo_src.exists():
|
||||
print("Set Logo")
|
||||
self.move_file(logo_src, "/etc/sogo/custom-fulllogo.svg")
|
||||
|
||||
# Rsync web content
|
||||
print("Syncing web content")
|
||||
self.rsync_file("/usr/lib/GNUstep/SOGo/", "/sogo_web/", recursive=True)
|
||||
|
||||
# Chown backup path
|
||||
self.set_owner("/sogo_backup", "sogo", "sogo", recursive=True)
|
||||
|
||||
def get_domains(self):
|
||||
"""
|
||||
Retrieves a list of domains and their GAL (Global Address List) status.
|
||||
|
||||
Executes a SQL query to select:
|
||||
- `domain`
|
||||
- a human-readable GAL status ("YES" or "NO")
|
||||
- `ldap_gal` as a boolean (True/False)
|
||||
|
||||
Returns:
|
||||
list[dict]: A list of dicts with keys: domain, gal_status, ldap_gal.
|
||||
Example: [{"domain": "example.com", "gal_status": "YES", "ldap_gal": True}]
|
||||
|
||||
Logs:
|
||||
Error messages if the query fails.
|
||||
"""
|
||||
|
||||
query = """
|
||||
SELECT domain,
|
||||
CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal_status,
|
||||
ldap_gal = 1 AS ldap_gal
|
||||
FROM domain;
|
||||
"""
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(query)
|
||||
result = cursor.fetchall()
|
||||
cursor.close()
|
||||
|
||||
return [
|
||||
{
|
||||
"domain": row[0],
|
||||
"gal_status": row[1],
|
||||
"ldap_gal": bool(row[2])
|
||||
}
|
||||
for row in result
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"Error fetching domains: {e}")
|
||||
return []
|
||||
|
||||
def get_identity_provider_settings(self):
|
||||
"""
|
||||
Retrieves all key-value identity provider settings.
|
||||
|
||||
Returns:
|
||||
dict: Settings in the format { key: value }
|
||||
|
||||
Logs:
|
||||
Error messages if the query fails.
|
||||
"""
|
||||
query = "SELECT `key`, `value` FROM identity_provider;"
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(query)
|
||||
result = cursor.fetchall()
|
||||
cursor.close()
|
||||
|
||||
iam_settings = {row[0]: row[1] for row in result}
|
||||
|
||||
if iam_settings['authsource'] == "ldap":
|
||||
protocol = "ldaps" if iam_settings.get("use_ssl") else "ldap"
|
||||
starttls = "/????!StartTLS" if iam_settings.get("use_tls") else ""
|
||||
iam_settings['ldap_url'] = f"{protocol}://{iam_settings['host']}:{iam_settings['port']}{starttls}"
|
||||
|
||||
return iam_settings
|
||||
except Exception as e:
|
||||
print(f"Error fetching identity provider settings: {e}")
|
||||
return {}
|
||||
0
data/Dockerfiles/bootstrap/modules/__init__.py
Normal file
0
data/Dockerfiles/bootstrap/modules/__init__.py
Normal file
@@ -1,25 +1,129 @@
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.21 AS builder
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
WORKDIR /src
|
||||
ENV CLAMD_VERSION=1.4.2
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
rsync \
|
||||
clamav \
|
||||
bind-tools \
|
||||
bash \
|
||||
tini
|
||||
g++ \
|
||||
gcc \
|
||||
gdb \
|
||||
make \
|
||||
cmake \
|
||||
py3-pytest \
|
||||
python3 \
|
||||
valgrind \
|
||||
bzip2-dev \
|
||||
check-dev \
|
||||
curl-dev \
|
||||
json-c-dev \
|
||||
libmilter-dev \
|
||||
libxml2-dev \
|
||||
linux-headers \
|
||||
ncurses-dev \
|
||||
openssl-dev \
|
||||
pcre2-dev \
|
||||
zlib-dev \
|
||||
cargo \
|
||||
rust
|
||||
|
||||
# init
|
||||
COPY clamd.sh /clamd.sh
|
||||
RUN chmod +x /sbin/tini
|
||||
RUN wget -P /src https://www.clamav.net/downloads/production/clamav-${CLAMD_VERSION}.tar.gz \
|
||||
&& tar xzfv /src/clamav-${CLAMD_VERSION}.tar.gz \
|
||||
&& cd /src/clamav-${CLAMD_VERSION} \
|
||||
&& cmake . \
|
||||
-D CMAKE_BUILD_TYPE="Release" \
|
||||
-D CMAKE_INSTALL_PREFIX="/usr" \
|
||||
-D CMAKE_INSTALL_LIBDIR="/usr/lib" \
|
||||
-D APP_CONFIG_DIRECTORY="/etc/clamav" \
|
||||
-D DATABASE_DIRECTORY="/var/lib/clamav" \
|
||||
-D ENABLE_CLAMONACC=OFF \
|
||||
-D ENABLE_EXAMPLES=OFF \
|
||||
-D ENABLE_MILTER=ON \
|
||||
-D ENABLE_MAN_PAGES=OFF \
|
||||
-D ENABLE_STATIC_LIB=OFF \
|
||||
-D ENABLE_JSON_SHARED=ON \
|
||||
&& cmake --build . \
|
||||
&& make DESTDIR="/clamav" -j$(($(nproc) - 1)) install \
|
||||
&& rm -r "/clamav/usr/lib/pkgconfig/" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(LocalSocket\) .*|\1 /tmp/clamd.sock|" \
|
||||
-e "s|.*\(TCPSocket\) .*|\1 3310|" \
|
||||
-e "s|.*\(TCPAddr\) .*|#\1 0.0.0.0|" \
|
||||
-e "s|.*\(User\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(LogFile\) .*|\1 /var/log/clamav/clamd.log|" \
|
||||
-e "s|^\#\(LogTime\).*|\1 yes|" \
|
||||
"/clamav/etc/clamav/clamd.conf.sample" > "/clamav/etc/clamav/clamd.conf" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(DatabaseOwner\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(UpdateLogFile\) .*|\1 /var/log/clamav/freshclam.log|" \
|
||||
-e "s|^\#\(NotifyClamd\).*|\1 /etc/clamav/clamd.conf|" \
|
||||
-e "s|^\#\(ScriptedUpdates\).*|\1 yes|" \
|
||||
"/clamav/etc/clamav/freshclam.conf.sample" > "/clamav/etc/clamav/freshclam.conf" \
|
||||
&& sed -e "s|^\(Example\)|\# \1|" \
|
||||
-e "s|.*\(MilterSocket\) .*|\1 inet:7357|" \
|
||||
-e "s|.*\(User\) .*|\1 clamav|" \
|
||||
-e "s|^\#\(LogFile\) .*|\1 /var/log/clamav/milter.log|" \
|
||||
-e "s|^\#\(LogTime\).*|\1 yes|" \
|
||||
-e "s|.*\(\ClamdSocket\) .*|\1 unix:/tmp/clamd.sock|" \
|
||||
"/clamav/etc/clamav/clamav-milter.conf.sample" > "/clamav/etc/clamav/clamav-milter.conf" || exit 1
|
||||
|
||||
# healthcheck
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY clamdcheck.sh /usr/local/bin
|
||||
RUN chmod +x /healthcheck.sh
|
||||
RUN chmod +x /usr/local/bin/clamdcheck.sh
|
||||
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk upgrade --no-cache \
|
||||
&& apk add --update --no-cache \
|
||||
tzdata \
|
||||
rsync \
|
||||
bind-tools \
|
||||
bash \
|
||||
tini \
|
||||
json-c \
|
||||
libbz2 \
|
||||
libcurl \
|
||||
libmilter \
|
||||
libxml2 \
|
||||
ncurses-libs \
|
||||
pcre2 \
|
||||
zlib \
|
||||
libgcc \
|
||||
py3-pip \
|
||||
&& addgroup -S "clamav" && \
|
||||
adduser -D -G "clamav" -h "/var/lib/clamav" -s "/bin/false" -S "clamav" && \
|
||||
install -d -m 755 -g "clamav" -o "clamav" "/var/log/clamav" && \
|
||||
chown -R clamav:clamav /var/lib/clamav
|
||||
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
|
||||
|
||||
COPY --from=builder "/clamav" "/"
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/clamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
COPY data/Dockerfiles/clamd/clamd.sh /clamd.sh
|
||||
COPY data/Dockerfiles/clamd/healthcheck.sh /healthcheck.sh
|
||||
COPY data/Dockerfiles/clamd/clamdcheck.sh /usr/local/bin
|
||||
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
||||
|
||||
ENTRYPOINT []
|
||||
RUN chmod +x /docker-entrypoint.sh \
|
||||
/clamd.sh \
|
||||
/healthcheck.sh \
|
||||
/usr/local/bin/clamdcheck.sh \
|
||||
/sbin/tini
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
|
||||
@@ -1,48 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_CLAMD}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_CLAMD=y, skipping ClamAV..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Cleaning up garbage
|
||||
echo "Cleaning up tmp files..."
|
||||
rm -rf /var/lib/clamav/clamav-*.tmp
|
||||
|
||||
# Prepare whitelist
|
||||
|
||||
mkdir -p /run/clamav /var/lib/clamav
|
||||
|
||||
if [[ -s /etc/clamav/whitelist.ign2 ]]; then
|
||||
echo "Copying non-empty whitelist.ign2 to /var/lib/clamav/whitelist.ign2"
|
||||
cp /etc/clamav/whitelist.ign2 /var/lib/clamav/whitelist.ign2
|
||||
fi
|
||||
|
||||
if [[ ! -f /var/lib/clamav/whitelist.ign2 ]]; then
|
||||
echo "Creating /var/lib/clamav/whitelist.ign2"
|
||||
cat <<EOF > /var/lib/clamav/whitelist.ign2
|
||||
# Please restart ClamAV after changing signatures
|
||||
Example-Signature.Ignore-1
|
||||
PUA.Win.Trojan.EmbeddedPDF-1
|
||||
PUA.Pdf.Trojan.EmbeddedJavaScript-1
|
||||
PUA.Pdf.Trojan.OpenActionObjectwithJavascript-1
|
||||
EOF
|
||||
fi
|
||||
|
||||
chown clamav:clamav -R /var/lib/clamav /run/clamav
|
||||
|
||||
chmod 755 /var/lib/clamav
|
||||
chmod 644 -R /var/lib/clamav/*
|
||||
chmod 750 /run/clamav
|
||||
|
||||
stat /var/lib/clamav/whitelist.ign2
|
||||
dos2unix /var/lib/clamav/whitelist.ign2
|
||||
sed -i '/^\s*$/d' /var/lib/clamav/whitelist.ign2
|
||||
# Copying to /etc/clamav to expose file as-is to administrator
|
||||
cp -p /var/lib/clamav/whitelist.ign2 /etc/clamav/whitelist.ign2
|
||||
|
||||
|
||||
BACKGROUND_TASKS=()
|
||||
|
||||
echo "Running freshclam..."
|
||||
@@ -91,6 +48,7 @@ done
|
||||
) &
|
||||
BACKGROUND_TASKS+=($!)
|
||||
|
||||
echo "$(clamd -V) is starting... please wait a moment."
|
||||
nice -n10 clamd &
|
||||
BACKGROUND_TASKS+=($!)
|
||||
|
||||
|
||||
@@ -11,4 +11,4 @@ if [ "${CLAMAV_NO_CLAMD:-}" != "false" ]; then
|
||||
echo "Clamd is up"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
exit 0
|
||||
|
||||
20
data/Dockerfiles/clamd/docker-entrypoint.sh
Normal file
20
data/Dockerfiles/clamd/docker-entrypoint.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Clamd."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Clamd..."
|
||||
exec "$@"
|
||||
@@ -1,6 +1,6 @@
|
||||
FROM alpine:3.19
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
@@ -19,9 +19,9 @@ RUN apk add --update --no-cache python3 \
|
||||
docker
|
||||
RUN mkdir /app/modules
|
||||
|
||||
COPY docker-entrypoint.sh /app/
|
||||
COPY main.py /app/main.py
|
||||
COPY modules/ /app/modules/
|
||||
COPY data/Dockerfiles/dockerapi/docker-entrypoint.sh /app/
|
||||
COPY data/Dockerfiles/dockerapi/main.py /app/main.py
|
||||
COPY data/Dockerfiles/dockerapi/modules/ /app/modules/
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
||||
CMD exec python main.py
|
||||
CMD ["python", "main.py"]
|
||||
@@ -34,9 +34,9 @@ async def lifespan(app: FastAPI):
|
||||
|
||||
# Init redis client
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0")
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0", password=os.environ['REDISPASS'])
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0")
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_HOST']}:6379/0", password=os.environ['REDISPASS'])
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
@@ -90,7 +90,7 @@ async def get_container(container_id : str):
|
||||
if container._id == container_id:
|
||||
container_info = await container.show()
|
||||
return Response(content=json.dumps(container_info, indent=4), media_type="application/json")
|
||||
|
||||
|
||||
res = {
|
||||
"type": "danger",
|
||||
"msg": "no container found"
|
||||
@@ -130,7 +130,7 @@ async def get_containers():
|
||||
async def post_containers(container_id : str, post_action : str, request: Request):
|
||||
global dockerapi
|
||||
|
||||
try :
|
||||
try:
|
||||
request_json = await request.json()
|
||||
except Exception as err:
|
||||
request_json = {}
|
||||
@@ -191,7 +191,7 @@ async def post_container_update_stats(container_id : str):
|
||||
|
||||
stats = json.loads(await dockerapi.redis_client.get(container_id + '_stats'))
|
||||
return Response(content=json.dumps(stats, indent=4), media_type="application/json")
|
||||
|
||||
|
||||
|
||||
# PubSub Handler
|
||||
async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
@@ -241,10 +241,10 @@ async def handle_pubsub_messages(channel: aioredis.client.PubSub):
|
||||
else:
|
||||
dockerapi.logger.error("api call: missing container_name, post_action or request")
|
||||
else:
|
||||
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
|
||||
dockerapi.logger.error("Unknown PubSub received - %s" % json.dumps(data_json))
|
||||
else:
|
||||
dockerapi.logger.error("Unknwon PubSub recieved - %s" % json.dumps(data_json))
|
||||
|
||||
dockerapi.logger.error("Unknown PubSub received - %s" % json.dumps(data_json))
|
||||
|
||||
await asyncio.sleep(0.0)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
|
||||
@@ -159,7 +159,7 @@ class DockerApi:
|
||||
postqueue_r = container.exec_run(["/bin/bash", "-c", "/usr/sbin/postqueue " + i], user='postfix')
|
||||
# todo: check each exit code
|
||||
res = { 'type': 'success', 'msg': 'Scheduled immediate delivery'}
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: mailq - task: list
|
||||
def container_post__exec__mailq__list(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
@@ -318,7 +318,7 @@ class DockerApi:
|
||||
|
||||
if 'username' in request_json and 'script_name' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
|
||||
cmd = ["/bin/bash", "-c", "/usr/bin/doveadm sieve get -u '" + request_json['username'].replace("'", "'\\''") + "' '" + request_json['script_name'].replace("'", "'\\''") + "'"]
|
||||
sieve_return = container.exec_run(cmd)
|
||||
return self.exec_run_handler('utf8_text_only', sieve_return)
|
||||
# api call: container_post - post_action: exec - cmd: maildir - task: cleanup
|
||||
@@ -342,6 +342,30 @@ class DockerApi:
|
||||
cmd = ["/bin/bash", "-c", cmd_vmail]
|
||||
maildir_cleanup = container.exec_run(cmd, user='vmail')
|
||||
return self.exec_run_handler('generic', maildir_cleanup)
|
||||
# api call: container_post - post_action: exec - cmd: maildir - task: move
|
||||
def container_post__exec__maildir__move(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
if 'old_maildir' in request_json and 'new_maildir' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
vmail_name = request_json['old_maildir'].replace("'", "'\\''")
|
||||
new_vmail_name = request_json['new_maildir'].replace("'", "'\\''")
|
||||
cmd_vmail = f"if [[ -d '/var/vmail/{vmail_name}' ]]; then /bin/mv '/var/vmail/{vmail_name}' '/var/vmail/{new_vmail_name}'; fi"
|
||||
|
||||
index_name = request_json['old_maildir'].split("/")
|
||||
new_index_name = request_json['new_maildir'].split("/")
|
||||
if len(index_name) > 1 and len(new_index_name) > 1:
|
||||
index_name = index_name[1].replace("'", "'\\''") + "@" + index_name[0].replace("'", "'\\''")
|
||||
new_index_name = new_index_name[1].replace("'", "'\\''") + "@" + new_index_name[0].replace("'", "'\\''")
|
||||
cmd_vmail_index = f"if [[ -d '/var/vmail_index/{index_name}' ]]; then /bin/mv '/var/vmail_index/{index_name}' '/var/vmail_index/{new_index_name}_index'; fi"
|
||||
cmd = ["/bin/bash", "-c", cmd_vmail + " && " + cmd_vmail_index]
|
||||
else:
|
||||
cmd = ["/bin/bash", "-c", cmd_vmail]
|
||||
maildir_move = container.exec_run(cmd, user='vmail')
|
||||
return self.exec_run_handler('generic', maildir_move)
|
||||
# api call: container_post - post_action: exec - cmd: rspamd - task: worker_password
|
||||
def container_post__exec__rspamd__worker_password(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
@@ -358,8 +382,8 @@ class DockerApi:
|
||||
for line in cmd_response.split("\n"):
|
||||
if '$2$' in line:
|
||||
hash = line.strip()
|
||||
hash_out = re.search('\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub('[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
hash_out = re.search(r'\$2\$.+$', hash).group(0)
|
||||
rspamd_passphrase_hash = re.sub(r'[^0-9a-zA-Z\$]+', '', hash_out.rstrip())
|
||||
rspamd_password_filename = "/etc/rspamd/override.d/worker-controller-password.inc"
|
||||
cmd = '''/bin/echo 'enable_password = "%s";' > %s && cat %s''' % (rspamd_passphrase_hash, rspamd_password_filename, rspamd_password_filename)
|
||||
cmd_response = self.exec_cmd_container(container, cmd, user="_rspamd")
|
||||
@@ -374,6 +398,121 @@ class DockerApi:
|
||||
self.logger.error('failed changing Rspamd password')
|
||||
res = { 'type': 'danger', 'msg': 'command did not complete' }
|
||||
return Response(content=json.dumps(res, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: sogo - task: rename
|
||||
def container_post__exec__sogo__rename_user(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
if 'old_username' in request_json and 'new_username' in request_json:
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
old_username = request_json['old_username'].replace("'", "'\\''")
|
||||
new_username = request_json['new_username'].replace("'", "'\\''")
|
||||
|
||||
sogo_return = container.exec_run(["/bin/bash", "-c", f"sogo-tool rename-user '{old_username}' '{new_username}'"], user='sogo')
|
||||
return self.exec_run_handler('generic', sogo_return)
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: get_acl
|
||||
def container_post__exec__doveadm__get_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
|
||||
shared_folders = container.exec_run(["/bin/bash", "-c", f"doveadm mailbox list -u '{id}'"])
|
||||
shared_folders = shared_folders.output.decode('utf-8')
|
||||
shared_folders = shared_folders.splitlines()
|
||||
|
||||
formatted_acls = []
|
||||
mailbox_seen = []
|
||||
for shared_folder in shared_folders:
|
||||
if "Shared" not in shared_folder:
|
||||
mailbox = shared_folder.replace("'", "'\\''")
|
||||
if mailbox in mailbox_seen:
|
||||
continue
|
||||
|
||||
acls = container.exec_run(["/bin/bash", "-c", f"doveadm acl get -u '{id}' '{mailbox}'"])
|
||||
acls = acls.output.decode('utf-8').strip().splitlines()
|
||||
if len(acls) >= 2:
|
||||
for acl in acls[1:]:
|
||||
user_id, rights = acl.split(maxsplit=1)
|
||||
user_id = user_id.split('=')[1]
|
||||
mailbox_seen.append(mailbox)
|
||||
formatted_acls.append({ 'user': id, 'id': user_id, 'mailbox': mailbox, 'rights': rights.split() })
|
||||
elif "Shared" in shared_folder and "/" in shared_folder:
|
||||
shared_folder = shared_folder.split("/")
|
||||
if len(shared_folder) < 3:
|
||||
continue
|
||||
|
||||
user = shared_folder[1].replace("'", "'\\''")
|
||||
mailbox = '/'.join(shared_folder[2:]).replace("'", "'\\''")
|
||||
if mailbox in mailbox_seen:
|
||||
continue
|
||||
|
||||
acls = container.exec_run(["/bin/bash", "-c", f"doveadm acl get -u '{user}' '{mailbox}'"])
|
||||
acls = acls.output.decode('utf-8').strip().splitlines()
|
||||
if len(acls) >= 2:
|
||||
for acl in acls[1:]:
|
||||
user_id, rights = acl.split(maxsplit=1)
|
||||
user_id = user_id.split('=')[1].replace("'", "'\\''")
|
||||
if user_id == id and mailbox not in mailbox_seen:
|
||||
mailbox_seen.append(mailbox)
|
||||
formatted_acls.append({ 'user': user, 'id': id, 'mailbox': mailbox, 'rights': rights.split() })
|
||||
|
||||
return Response(content=json.dumps(formatted_acls, indent=4), media_type="application/json")
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: delete_acl
|
||||
def container_post__exec__doveadm__delete_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
user = request_json['user'].replace("'", "'\\''")
|
||||
mailbox = request_json['mailbox'].replace("'", "'\\''")
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
|
||||
if user and mailbox and id:
|
||||
acl_delete_return = container.exec_run(["/bin/bash", "-c", f"doveadm acl delete -u '{user}' '{mailbox}' 'user={id}'"])
|
||||
return self.exec_run_handler('generic', acl_delete_return)
|
||||
# api call: container_post - post_action: exec - cmd: doveadm - task: set_acl
|
||||
def container_post__exec__doveadm__set_acl(self, request_json, **kwargs):
|
||||
if 'container_id' in kwargs:
|
||||
filters = {"id": kwargs['container_id']}
|
||||
elif 'container_name' in kwargs:
|
||||
filters = {"name": kwargs['container_name']}
|
||||
|
||||
for container in self.sync_docker_client.containers.list(filters=filters):
|
||||
user = request_json['user'].replace("'", "'\\''")
|
||||
mailbox = request_json['mailbox'].replace("'", "'\\''")
|
||||
id = request_json['id'].replace("'", "'\\''")
|
||||
rights = ""
|
||||
|
||||
available_rights = [
|
||||
"admin",
|
||||
"create",
|
||||
"delete",
|
||||
"expunge",
|
||||
"insert",
|
||||
"lookup",
|
||||
"post",
|
||||
"read",
|
||||
"write",
|
||||
"write-deleted",
|
||||
"write-seen"
|
||||
]
|
||||
for right in request_json['rights']:
|
||||
right = right.replace("'", "'\\''").lower()
|
||||
if right in available_rights:
|
||||
rights += right + " "
|
||||
|
||||
if user and mailbox and id and rights:
|
||||
acl_set_return = container.exec_run(["/bin/bash", "-c", f"doveadm acl set -u '{user}' '{mailbox}' 'user={id}' {rights}"])
|
||||
return self.exec_run_handler('generic', acl_set_return)
|
||||
|
||||
|
||||
# Collect host stats
|
||||
async def get_host_stats(self, wait=5):
|
||||
@@ -462,7 +601,7 @@ class DockerApi:
|
||||
except:
|
||||
pass
|
||||
return ''.join(total_data)
|
||||
|
||||
|
||||
try :
|
||||
socket = container.exec_run([shell_cmd], stdin=True, socket=True, user=user).output._sock
|
||||
if not cmd.endswith("\n"):
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
FROM alpine:3.19
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
ENV LC_ALL C.UTF-8
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
|
||||
# Add groups and users before installing Dovecot to not break compatibility
|
||||
RUN addgroup -g 5000 vmail \
|
||||
@@ -24,6 +25,7 @@ RUN addgroup -g 5000 vmail \
|
||||
envsubst \
|
||||
ca-certificates \
|
||||
curl \
|
||||
coreutils \
|
||||
jq \
|
||||
lua \
|
||||
lua-cjson \
|
||||
@@ -32,9 +34,13 @@ RUN addgroup -g 5000 vmail \
|
||||
lua5.3-sql-mysql \
|
||||
icu-data-full \
|
||||
mariadb-connector-c \
|
||||
lua-sec \
|
||||
mariadb-dev \
|
||||
glib-dev \
|
||||
gcompat \
|
||||
mariadb-client \
|
||||
perl \
|
||||
perl-dev \
|
||||
perl-ntlm \
|
||||
perl-cgi \
|
||||
perl-crypt-openssl-rsa \
|
||||
@@ -62,8 +68,8 @@ RUN addgroup -g 5000 vmail \
|
||||
perl-package-stash-xs \
|
||||
perl-par-packer \
|
||||
perl-parse-recdescent \
|
||||
perl-lockfile-simple --repository=http://dl-cdn.alpinelinux.org/alpine/edge/community/ \
|
||||
libproc \
|
||||
perl-lockfile-simple \
|
||||
libproc2 \
|
||||
perl-readonly \
|
||||
perl-regexp-common \
|
||||
perl-sys-meminfo \
|
||||
@@ -81,11 +87,11 @@ RUN addgroup -g 5000 vmail \
|
||||
perl-proc-processtable \
|
||||
perl-app-cpanminus \
|
||||
procps \
|
||||
python3 \
|
||||
py3-mysqlclient \
|
||||
python3 py3-pip python3-dev \
|
||||
py3-html2text \
|
||||
py3-jinja2 \
|
||||
py3-redis \
|
||||
linux-headers \
|
||||
musl-dev \
|
||||
gcc \
|
||||
redis \
|
||||
syslog-ng \
|
||||
syslog-ng-redis \
|
||||
@@ -103,32 +109,42 @@ RUN addgroup -g 5000 vmail \
|
||||
dovecot-submissiond \
|
||||
dovecot-pigeonhole-plugin \
|
||||
dovecot-pop3d \
|
||||
dovecot-fts-solr \
|
||||
dovecot-fts-flatcurve \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$arch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true
|
||||
|
||||
# RUN cpan LockFile::Simple
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY imapsync /usr/local/bin/imapsync
|
||||
COPY imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
||||
COPY report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
COPY report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
COPY rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
||||
COPY rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
||||
COPY sa-rules.sh /usr/local/bin/sa-rules.sh
|
||||
COPY maildir_gc.sh /usr/local/bin/maildir_gc.sh
|
||||
COPY docker-entrypoint.sh /
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY repl_health.sh /usr/local/bin/repl_health.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/dovecot/trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY data/Dockerfiles/dovecot/clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
COPY data/Dockerfiles/dovecot/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/dovecot/imapsync /usr/local/bin/imapsync
|
||||
COPY data/Dockerfiles/dovecot/imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
||||
COPY data/Dockerfiles/dovecot/report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
COPY data/Dockerfiles/dovecot/report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
COPY data/Dockerfiles/dovecot/rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
||||
COPY data/Dockerfiles/dovecot/rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
||||
COPY data/Dockerfiles/dovecot/sa-rules.sh /usr/local/bin/sa-rules.sh
|
||||
COPY data/Dockerfiles/dovecot/docker-entrypoint.sh /
|
||||
COPY data/Dockerfiles/dovecot/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/dovecot/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/dovecot/quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY data/Dockerfiles/dovecot/quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY data/Dockerfiles/dovecot/repl_health.sh /usr/local/bin/repl_health.sh
|
||||
COPY data/Dockerfiles/dovecot/optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
source /source_env.sh
|
||||
|
||||
MAX_AGE=$(redis-cli --raw -h redis-mailcow GET Q_MAX_AGE)
|
||||
MAX_AGE=$(redis-cli --raw -h redis-mailcow -a ${REDISPASS} --no-auth-warning GET Q_MAX_AGE)
|
||||
|
||||
if [[ -z ${MAX_AGE} ]]; then
|
||||
echo "Max age for quarantine items not defined"
|
||||
@@ -15,6 +15,6 @@ if ! [[ ${MAX_AGE} =~ ${NUM_REGEXP} ]] ; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TO_DELETE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT COUNT(id) FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY" -BN)
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY"
|
||||
TO_DELETE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT COUNT(id) FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY" -BN)
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DELETE FROM quarantine WHERE created < NOW() - INTERVAL ${MAX_AGE//[!0-9]/} DAY"
|
||||
echo "Deleted ${TO_DELETE} items from quarantine table (max age is ${MAX_AGE//[!0-9]/} days)"
|
||||
|
||||
@@ -1,424 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
|
||||
# Create missing directories
|
||||
[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
|
||||
[[ ! -d /etc/dovecot/lua/ ]] && mkdir -p /etc/dovecot/lua/
|
||||
[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
|
||||
[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
|
||||
[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
|
||||
[[ ! -d /var/volatile ]] && mkdir -p /var/volatile
|
||||
|
||||
# Set Dovecot sql config parameters, escape " in db password
|
||||
DBPASS=$(echo ${DBPASS} | sed 's/"/\\"/g')
|
||||
|
||||
# Create quota dict for Dovecot
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
QUOTA_TABLE=quota2
|
||||
else
|
||||
QUOTA_TABLE=quota2replica
|
||||
fi
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-quota.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/quota/storage
|
||||
table = ${QUOTA_TABLE}
|
||||
username_field = username
|
||||
value_field = bytes
|
||||
}
|
||||
map {
|
||||
pattern = priv/quota/messages
|
||||
table = ${QUOTA_TABLE}
|
||||
username_field = username
|
||||
value_field = messages
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create dict used for sieve pre and postfilters
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_before.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/sieve/name/\$script_name
|
||||
table = sieve_before
|
||||
username_field = username
|
||||
value_field = id
|
||||
fields {
|
||||
script_name = \$script_name
|
||||
}
|
||||
}
|
||||
map {
|
||||
pattern = priv/sieve/data/\$id
|
||||
table = sieve_before
|
||||
username_field = username
|
||||
value_field = script_data
|
||||
fields {
|
||||
id = \$id
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_after.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/sieve/name/\$script_name
|
||||
table = sieve_after
|
||||
username_field = username
|
||||
value_field = id
|
||||
fields {
|
||||
script_name = \$script_name
|
||||
}
|
||||
}
|
||||
map {
|
||||
pattern = priv/sieve/data/\$id
|
||||
table = sieve_after
|
||||
username_field = username
|
||||
value_field = script_data
|
||||
fields {
|
||||
id = \$id
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
else
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_solr listescape replication' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_solr listescape replication' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_solr notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
fi
|
||||
chmod 644 /etc/dovecot/mail_plugins /etc/dovecot/mail_plugins_imap /etc/dovecot/mail_plugins_lmtp /templates/quarantine.tpl
|
||||
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-userdb.conf
|
||||
# Autogenerated by mailcow
|
||||
driver = mysql
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%n/${MAILDIR_SUB}:VOLATILEDIR=/var/volatile/%u:INDEX=/var/vmail_index/%u') AS mail, '%s' AS protocol, 5000 AS uid, 5000 AS gid, concat('*:bytes=', quota) AS quota_rule FROM mailbox WHERE username = '%u' AND (active = '1' OR active = '2')
|
||||
iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /etc/dovecot/lua/passwd-verify.lua
|
||||
function auth_password_verify(req, pass)
|
||||
|
||||
if req.domain == nil then
|
||||
return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, "No such user"
|
||||
end
|
||||
|
||||
if cur == nil then
|
||||
script_init()
|
||||
end
|
||||
|
||||
if req.user == nil then
|
||||
req.user = ''
|
||||
end
|
||||
|
||||
respbody = {}
|
||||
|
||||
-- check against mailbox passwds
|
||||
local cur,errorString = con:execute(string.format([[SELECT password FROM mailbox
|
||||
WHERE username = '%s'
|
||||
AND active = '1'
|
||||
AND domain IN (SELECT domain FROM domain WHERE domain='%s' AND active='1')
|
||||
AND IFNULL(JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.force_pw_update')), 0) != '1'
|
||||
AND IFNULL(JSON_UNQUOTE(JSON_VALUE(attributes, '$.%s_access')), 1) = '1']], con:escape(req.user), con:escape(req.domain), con:escape(req.service)))
|
||||
local row = cur:fetch ({}, "a")
|
||||
while row do
|
||||
if req.password_verify(req, row.password, pass) == 1 then
|
||||
con:execute(string.format([[REPLACE INTO sasl_log (service, app_password, username, real_rip)
|
||||
VALUES ("%s", 0, "%s", "%s")]], con:escape(req.service), con:escape(req.user), con:escape(req.real_rip)))
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
end
|
||||
row = cur:fetch (row, "a")
|
||||
end
|
||||
|
||||
-- check against app passwds for imap and smtp
|
||||
-- app passwords are only available for imap, smtp, sieve and pop3 when using sasl
|
||||
if req.service == "smtp" or req.service == "imap" or req.service == "sieve" or req.service == "pop3" then
|
||||
local cur,errorString = con:execute(string.format([[SELECT app_passwd.id, %s_access AS has_prot_access, app_passwd.password FROM app_passwd
|
||||
INNER JOIN mailbox ON mailbox.username = app_passwd.mailbox
|
||||
WHERE mailbox = '%s'
|
||||
AND app_passwd.active = '1'
|
||||
AND mailbox.active = '1'
|
||||
AND app_passwd.domain IN (SELECT domain FROM domain WHERE domain='%s' AND active='1')]], con:escape(req.service), con:escape(req.user), con:escape(req.domain)))
|
||||
local row = cur:fetch ({}, "a")
|
||||
while row do
|
||||
if req.password_verify(req, row.password, pass) == 1 then
|
||||
-- if password is valid and protocol access is 1 OR real_rip matches SOGo, proceed
|
||||
if tostring(req.real_rip) == "__IPV4_SOGO__" then
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
elseif row.has_prot_access == "1" then
|
||||
con:execute(string.format([[REPLACE INTO sasl_log (service, app_password, username, real_rip)
|
||||
VALUES ("%s", %d, "%s", "%s")]], con:escape(req.service), row.id, con:escape(req.user), con:escape(req.real_rip)))
|
||||
cur:close()
|
||||
con:close()
|
||||
return dovecot.auth.PASSDB_RESULT_OK, ""
|
||||
end
|
||||
end
|
||||
row = cur:fetch (row, "a")
|
||||
end
|
||||
end
|
||||
|
||||
cur:close()
|
||||
con:close()
|
||||
|
||||
return dovecot.auth.PASSDB_RESULT_PASSWORD_MISMATCH, "Failed to authenticate"
|
||||
|
||||
-- PoC
|
||||
-- local reqbody = string.format([[{
|
||||
-- "success":0,
|
||||
-- "service":"%s",
|
||||
-- "app_password":false,
|
||||
-- "username":"%s",
|
||||
-- "real_rip":"%s"
|
||||
-- }]], con:escape(req.service), con:escape(req.user), con:escape(req.real_rip))
|
||||
-- http.request {
|
||||
-- method = "POST",
|
||||
-- url = "http://nginx:8081/sasl_log.php",
|
||||
-- source = ltn12.source.string(reqbody),
|
||||
-- headers = {
|
||||
-- ["content-type"] = "application/json",
|
||||
-- ["content-length"] = tostring(#reqbody)
|
||||
-- },
|
||||
-- sink = ltn12.sink.table(respbody)
|
||||
-- }
|
||||
|
||||
end
|
||||
|
||||
function auth_passdb_lookup(req)
|
||||
return dovecot.auth.PASSDB_RESULT_USER_UNKNOWN, ""
|
||||
end
|
||||
|
||||
function script_init()
|
||||
mysql = require "luasql.mysql"
|
||||
http = require "socket.http"
|
||||
http.TIMEOUT = 5
|
||||
ltn12 = require "ltn12"
|
||||
env = mysql.mysql()
|
||||
con = env:connect("__DBNAME__","__DBUSER__","__DBPASS__","localhost")
|
||||
return 0
|
||||
end
|
||||
|
||||
function script_deinit()
|
||||
con:close()
|
||||
env:close()
|
||||
end
|
||||
EOF
|
||||
|
||||
# Replace patterns in app-passdb.lua
|
||||
sed -i "s/__DBUSER__/${DBUSER}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__DBPASS__/${DBPASS}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__DBNAME__/${DBNAME}/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
sed -i "s/__IPV4_SOGO__/${IPV4_NETWORK}.248/g" /etc/dovecot/lua/passwd-verify.lua
|
||||
|
||||
|
||||
# Migrate old sieve_after file
|
||||
[[ -f /etc/dovecot/sieve_after ]] && mv /etc/dovecot/sieve_after /etc/dovecot/global_sieve_after
|
||||
# Create global sieve scripts
|
||||
cat /etc/dovecot/global_sieve_after > /var/vmail/sieve/global_sieve_after.sieve
|
||||
cat /etc/dovecot/global_sieve_before > /var/vmail/sieve/global_sieve_before.sieve
|
||||
|
||||
# Check permissions of vmail/index/garbage directories.
|
||||
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
||||
if [[ $(stat -c %U /var/vmail/) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail ; fi
|
||||
if [[ $(stat -c %U /var/vmail/_garbage) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail/_garbage ; fi
|
||||
if [[ $(stat -c %U /var/vmail_index) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail_index ; fi
|
||||
|
||||
# Cleanup random user maildirs
|
||||
rm -rf /var/vmail/mailcow.local/*
|
||||
# Cleanup PIDs
|
||||
[[ -f /tmp/quarantine_notify.pid ]] && rm /tmp/quarantine_notify.pid
|
||||
|
||||
# create sni configuration
|
||||
echo "" > /etc/dovecot/sni.conf
|
||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
||||
continue
|
||||
fi
|
||||
domains=($(cat ${cert_dir}domains))
|
||||
for domain in ${domains[@]}; do
|
||||
echo 'local_name '${domain}' {' >> /etc/dovecot/sni.conf;
|
||||
echo ' ssl_cert = <'${cert_dir}'cert.pem' >> /etc/dovecot/sni.conf;
|
||||
echo ' ssl_key = <'${cert_dir}'key.pem' >> /etc/dovecot/sni.conf;
|
||||
echo '}' >> /etc/dovecot/sni.conf;
|
||||
done
|
||||
done
|
||||
|
||||
# Create random master for SOGo sieve features
|
||||
RAND_USER=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 16 | head -n 1)
|
||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 24 | head -n 1)
|
||||
|
||||
if [[ ! -z ${DOVECOT_MASTER_USER} ]] && [[ ! -z ${DOVECOT_MASTER_PASS} ]]; then
|
||||
RAND_USER=${DOVECOT_MASTER_USER}
|
||||
RAND_PASS=${DOVECOT_MASTER_PASS}
|
||||
fi
|
||||
echo ${RAND_USER}@mailcow.local:{SHA1}$(echo -n ${RAND_PASS} | sha1sum | awk '{print $1}'):::::: > /etc/dovecot/dovecot-master.passwd
|
||||
echo ${RAND_USER}@mailcow.local::5000:5000:::: > /etc/dovecot/dovecot-master.userdb
|
||||
echo ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/sieve.creds
|
||||
|
||||
if [[ -z ${MAILDIR_SUB} ]]; then
|
||||
MAILDIR_SUB_SHARED=
|
||||
else
|
||||
MAILDIR_SUB_SHARED=/${MAILDIR_SUB}
|
||||
fi
|
||||
cat <<EOF > /etc/dovecot/shared_namespace.conf
|
||||
# Autogenerated by mailcow
|
||||
namespace {
|
||||
type = shared
|
||||
separator = /
|
||||
prefix = Shared/%%u/
|
||||
location = maildir:%%h${MAILDIR_SUB_SHARED}:INDEX=~${MAILDIR_SUB_SHARED}/Shared/%%u
|
||||
subscriptions = no
|
||||
list = children
|
||||
}
|
||||
EOF
|
||||
|
||||
|
||||
cat <<EOF > /etc/dovecot/sogo_trusted_ip.conf
|
||||
# Autogenerated by mailcow
|
||||
remote ${IPV4_NETWORK}.248 {
|
||||
disable_plaintext_auth = no
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create random master Password for SOGo SSO
|
||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 32 | head -n 1)
|
||||
echo -n ${RAND_PASS} > /etc/phpfpm/sogo-sso.pass
|
||||
cat <<EOF > /etc/dovecot/sogo-sso.conf
|
||||
# Autogenerated by mailcow
|
||||
passdb {
|
||||
driver = static
|
||||
args = allow_real_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
|
||||
}
|
||||
EOF
|
||||
|
||||
if [[ "${MASTER}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
# Toggling MASTER will result in a rebuild of containers, so the quota script will be recreated
|
||||
cat <<'EOF' > /usr/local/bin/quota_notify.py
|
||||
#!/usr/bin/python3
|
||||
import sys
|
||||
sys.exit()
|
||||
EOF
|
||||
fi
|
||||
|
||||
# 401 is user dovecot
|
||||
if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
|
||||
openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
|
||||
openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem
|
||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
||||
else
|
||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
||||
fi
|
||||
|
||||
# Compile sieve scripts
|
||||
sievec /var/vmail/sieve/global_sieve_before.sieve
|
||||
sievec /var/vmail/sieve/global_sieve_after.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
|
||||
for file in /var/vmail/*/*/sieve/*.sieve ; do
|
||||
if [[ "$file" == "/var/vmail/*/*/sieve/*.sieve" ]]; then
|
||||
continue
|
||||
fi
|
||||
sievec "$file" "$(dirname "$file")/../.dovecot.svbin"
|
||||
chown vmail:vmail "$(dirname "$file")/../.dovecot.svbin"
|
||||
done
|
||||
|
||||
# Fix permissions
|
||||
chown root:root /etc/dovecot/sql/*.conf
|
||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/lua/passwd-verify.lua
|
||||
chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/lua/passwd-verify.lua
|
||||
chown -R vmail:vmail /var/vmail/sieve
|
||||
chown -R vmail:vmail /var/volatile
|
||||
chown -R vmail:vmail /var/vmail_index
|
||||
adduser vmail tty
|
||||
chmod g+rw /dev/console
|
||||
chown root:tty /dev/console
|
||||
chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
|
||||
/usr/lib/dovecot/sieve/rspamd-pipe-spam \
|
||||
/usr/local/bin/imapsync_runner.pl \
|
||||
/usr/local/bin/imapsync \
|
||||
/usr/local/bin/trim_logs.sh \
|
||||
/usr/local/bin/sa-rules.sh \
|
||||
/usr/local/bin/clean_q_aged.sh \
|
||||
/usr/local/bin/maildir_gc.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/usr/local/bin/quota_notify.py \
|
||||
/usr/local/bin/repl_health.sh
|
||||
|
||||
# Prepare environment file for cronjobs
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
||||
|
||||
# Clean old PID if any
|
||||
[[ -f /var/run/dovecot/master.pid ]] && rm /var/run/dovecot/master.pid
|
||||
|
||||
# Clean stopped imapsync jobs
|
||||
rm -f /tmp/imapsync_busy.lock
|
||||
IMAPSYNC_TABLE=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
|
||||
[[ ! -z ${IMAPSYNC_TABLE} ]] && mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
|
||||
|
||||
# Envsubst maildir_gc
|
||||
echo "$(envsubst < /usr/local/bin/maildir_gc.sh)" > /usr/local/bin/maildir_gc.sh
|
||||
|
||||
# GUID generation
|
||||
while [[ ${VERSIONS_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
|
||||
VERSIONS_OK=OK
|
||||
else
|
||||
echo "Waiting for versions table to be created..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
PUBKEY_MCRYPT=$(doveconf -P 2> /dev/null | grep -i mail_crypt_global_public_key | cut -d '<' -f2)
|
||||
if [ -f ${PUBKEY_MCRYPT} ]; then
|
||||
GUID=$(cat <(echo ${MAILCOW_HOSTNAME}) /mail_crypt/ecpubkey.pem | sha256sum | cut -d ' ' -f1 | tr -cd "[a-fA-F0-9.:/] ")
|
||||
if [ ${#GUID} -eq 64 ]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "${GUID}");
|
||||
EOF
|
||||
else
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "INVALID");
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
# Collect SA rules once now
|
||||
/usr/local/bin/sa-rules.sh
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
@@ -428,12 +8,24 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
# For some strange, unknown and stupid reason, Dovecot may run into a race condition, when this file is not touched before it is read by dovecot/auth
|
||||
# May be related to something inside Docker, I seriously don't know
|
||||
touch /etc/dovecot/lua/passwd-verify.lua
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.conf /etc/dovecot/extra.conf; then
|
||||
sed -i '/\[openssl_init\]/a ssl_conf = ssl_configuration' /etc/ssl/openssl.cnf
|
||||
|
||||
echo "[ssl_configuration]" >> /etc/ssl/openssl.cnf
|
||||
echo "system_default = tls_system_default" >> /etc/ssl/openssl.cnf
|
||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Dovecot."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Dovecot..."
|
||||
/usr/sbin/dovecot -F
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
[ -d /var/vmail/_garbage/ ] && /usr/bin/find /var/vmail/_garbage/ -mindepth 1 -maxdepth 1 -type d -cmin +${MAILDIR_GC_TIME} -exec rm -r {} \;
|
||||
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
7
data/Dockerfiles/dovecot/optimize-fts.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_FTS}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
exit 0
|
||||
else
|
||||
doveadm fts optimize -A
|
||||
fi
|
||||
@@ -31,7 +31,7 @@ try:
|
||||
|
||||
while True:
|
||||
try:
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
|
||||
@@ -14,6 +14,11 @@ import sys
|
||||
import html2text
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
|
||||
|
||||
# Don't run if role is not master
|
||||
if os.getenv("MASTER").lower() in ["n", "no"]:
|
||||
sys.exit()
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
percent = int(sys.argv[1])
|
||||
username = str(sys.argv[2])
|
||||
@@ -23,7 +28,7 @@ else:
|
||||
|
||||
while True:
|
||||
try:
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0)
|
||||
r = redis.StrictRedis(host='redis', decode_responses=True, port=6379, db=0, username='quota_notify', password='')
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again...' % (ex))
|
||||
|
||||
@@ -4,14 +4,14 @@ source /source_env.sh
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
# Is replication active?
|
||||
# grep on file is less expensive than doveconf
|
||||
if ! grep -qi mail_replica /etc/dovecot/dovecot.conf; then
|
||||
if [ -n ${MAILCOW_REPLICA_IP} ]; then
|
||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
exit
|
||||
fi
|
||||
|
||||
@@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
||||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnham
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -3,8 +3,8 @@ FILE=/tmp/mail$$
|
||||
cat > $FILE
|
||||
trap "/bin/rm -f $FILE" 0 1 2 3 13 15
|
||||
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/fuzzyadd
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 13" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzydel
|
||||
cat ${FILE} | /usr/bin/curl -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/learnspam
|
||||
cat ${FILE} | /usr/bin/curl -H "Flag: 11" -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/fuzzyadd
|
||||
|
||||
exit 0
|
||||
|
||||
@@ -11,21 +11,25 @@ else
|
||||
fi
|
||||
|
||||
# Deploy
|
||||
curl --connect-timeout 15 --retry 10 --max-time 30 http://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
if curl --connect-timeout 15 --retry 5 --max-time 30 https://www.spamassassin.heinlein-support.de/$(dig txt 1.4.3.spamassassin.heinlein-support.de +short | tr -d '"' | tr -dc '0-9').tar.gz --output /tmp/sa-rules-heinlein.tar.gz; then
|
||||
if gzip -t /tmp/sa-rules-heinlein.tar.gz; then
|
||||
tar xfvz /tmp/sa-rules-heinlein.tar.gz -C /tmp/sa-rules-heinlein
|
||||
cat /tmp/sa-rules-heinlein/*cf > /etc/rspamd/custom/sa-rules
|
||||
fi
|
||||
else
|
||||
echo "Failed to download SA rules. Exiting."
|
||||
exit 0 # Must be 0 otherwise dovecot would not start at all
|
||||
fi
|
||||
|
||||
sed -i -e 's/\([^\\]\)\$\([^\/]\)/\1\\$\2/g' /etc/rspamd/custom/sa-rules
|
||||
|
||||
if [[ "$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)" != "${HASH_SA_RULES}" ]]; then
|
||||
CONTAINER_NAME=rspamd-mailcow
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | \
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | \
|
||||
jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | \
|
||||
jq -rc "select( .name | tostring | contains(\"${CONTAINER_NAME}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:dovecot]
|
||||
command=/usr/sbin/dovecot -F
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
|
||||
@@ -7,6 +7,7 @@ options {
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_dgram {
|
||||
@@ -19,6 +20,7 @@ destination d_redis_ui_log {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -27,6 +29,7 @@ destination d_redis_f2b_channel {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
@@ -35,8 +38,13 @@ filter f_replica {
|
||||
not match("User has no mail_replica in userdb" value("MESSAGE"));
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
filter f_dovecot_auth_try {
|
||||
not match("- trying the next passdb" value("MESSAGE")) and
|
||||
not match("- trying the next userdb" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_dgram);
|
||||
filter(f_dovecot_auth_try);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
||||
@@ -7,6 +7,7 @@ options {
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_dgram {
|
||||
@@ -19,6 +20,7 @@ destination d_redis_ui_log {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "DOVECOT_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -27,6 +29,7 @@ destination d_redis_f2b_channel {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
@@ -35,8 +38,13 @@ filter f_replica {
|
||||
not match("User has no mail_replica in userdb" value("MESSAGE"));
|
||||
not match("Error: sync: Unknown user in remote" value("MESSAGE"));
|
||||
};
|
||||
filter f_dovecot_auth_try {
|
||||
not match("- trying the next passdb" value("MESSAGE")) and
|
||||
not match("- trying the next userdb" value("MESSAGE"));
|
||||
};
|
||||
log {
|
||||
source(s_dgram);
|
||||
filter(f_dovecot_auth_try);
|
||||
filter(f_replica);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
|
||||
@@ -10,9 +10,9 @@ catch_non_zero() {
|
||||
source /source_env.sh
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM ACME_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM POSTFIX_MAILLOG 0 ${LOG_LINES}"
|
||||
@@ -23,3 +23,4 @@ catch_non_zero "${REDIS_CMDLINE} LTRIM AUTODISCOVER_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM API_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM RL_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM WATCHDOG_LOG 0 ${LOG_LINES}"
|
||||
catch_non_zero "${REDIS_CMDLINE} LTRIM CRON_LOG 0 ${LOG_LINES}"
|
||||
|
||||
28
data/Dockerfiles/mariadb/Dockerfile
Normal file
28
data/Dockerfiles/mariadb/Dockerfile
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM mariadb:10.11
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/mariadb/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["mysqld"]
|
||||
20
data/Dockerfiles/mariadb/docker-entrypoint.sh
Normal file
20
data/Dockerfiles/mariadb/docker-entrypoint.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting MariaDB."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting MariaDB..."
|
||||
exec /usr/local/bin/docker-entrypoint.sh "$@"
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM alpine:3.19
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
|
||||
@@ -21,28 +21,6 @@ from modules.IPTables import IPTables
|
||||
from modules.NFTables import NFTables
|
||||
|
||||
|
||||
# connect to redis
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0)
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0)
|
||||
r.ping()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
pubsub = r.pubsub()
|
||||
|
||||
# rename fail2ban to netfilter
|
||||
if r.exists('F2B_LOG'):
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
|
||||
|
||||
# globals
|
||||
WHITELIST = []
|
||||
BLACKLIST= []
|
||||
@@ -50,18 +28,10 @@ bans = {}
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
lock = Lock()
|
||||
|
||||
|
||||
# init Logger
|
||||
logger = Logger(r)
|
||||
# init backend
|
||||
backend = sys.argv[1]
|
||||
if backend == "nftables":
|
||||
logger.logInfo('Using NFTables backend')
|
||||
tables = NFTables("MAILCOW", logger)
|
||||
else:
|
||||
logger.logInfo('Using IPTables backend')
|
||||
tables = IPTables("MAILCOW", logger)
|
||||
chain_name = "MAILCOW"
|
||||
r = None
|
||||
pubsub = None
|
||||
clear_before_quit = False
|
||||
|
||||
|
||||
def refreshF2boptions():
|
||||
@@ -110,16 +80,15 @@ def refreshF2bregex():
|
||||
global exit_code
|
||||
if not r.get('F2B_REGEX'):
|
||||
f2bregex = {}
|
||||
f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),'
|
||||
f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+'
|
||||
f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
f2bregex[1] = r'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)'
|
||||
f2bregex[2] = r'Rspamd UI: Invalid password by ([0-9a-f\.:]+)'
|
||||
f2bregex[3] = r'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+'
|
||||
f2bregex[4] = r'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+'
|
||||
f2bregex[5] = r'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+'
|
||||
f2bregex[6] = r'\w+\([^,]+,([0-9a-f\.:]+),<[^>]+>\): Password mismatch \(SHA1 of given password: [a-f0-9]+\)'
|
||||
f2bregex[7] = r'\w+\([^,]+,([0-9a-f\.:]+),<[^>]+>\): unknown user \(SHA1 of given password: [a-f0-9]+\)'
|
||||
f2bregex[8] = r'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked'
|
||||
f2bregex[9] = r'([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+'
|
||||
r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False))
|
||||
else:
|
||||
try:
|
||||
@@ -136,7 +105,7 @@ def get_ip(address):
|
||||
ip = ip.ipv4_mapped
|
||||
if ip.is_private or ip.is_loopback:
|
||||
return False
|
||||
|
||||
|
||||
return ip
|
||||
|
||||
def ban(address):
|
||||
@@ -144,8 +113,6 @@ def ban(address):
|
||||
global lock
|
||||
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4'])
|
||||
@@ -180,7 +147,7 @@ def ban(address):
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
@@ -250,17 +217,21 @@ def clear():
|
||||
with lock:
|
||||
tables.clearIPv4Table()
|
||||
tables.clearIPv6Table()
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
pubsub.unsubscribe()
|
||||
try:
|
||||
if r is not None:
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error clearing redis keys F2B_ACTIVE_BANS and F2B_PERM_BANS: %s' % ex)
|
||||
|
||||
def watch():
|
||||
logger.logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
global pubsub
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
logger.logInfo('Watching Redis channel F2B_CHANNEL')
|
||||
pubsub.subscribe('F2B_CHANNEL')
|
||||
|
||||
while not quit_now:
|
||||
try:
|
||||
for item in pubsub.listen():
|
||||
@@ -280,6 +251,7 @@ def watch():
|
||||
ban(addr)
|
||||
except Exception as ex:
|
||||
logger.logWarn('Error reading log line from pubsub: %s' % ex)
|
||||
pubsub = None
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
@@ -302,12 +274,11 @@ def snat6(snat_target):
|
||||
tables.snat6(snat_target, os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64'))
|
||||
|
||||
def autopurge():
|
||||
global f2boptions
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
if QUEUE_UNBAN:
|
||||
@@ -315,9 +286,9 @@ def autopurge():
|
||||
unban(str(net))
|
||||
for net in bans.copy():
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter']
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME:
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME:
|
||||
unban(net)
|
||||
|
||||
def mailcowChainOrder():
|
||||
@@ -331,6 +302,16 @@ def mailcowChainOrder():
|
||||
if quit_now: return
|
||||
quit_now, exit_code = tables.checkIPv6ChainOrder()
|
||||
|
||||
def calcNetBanTime(ban_counter):
|
||||
global f2boptions
|
||||
|
||||
BAN_TIME = int(f2boptions['ban_time'])
|
||||
MAX_BAN_TIME = int(f2boptions['max_ban_time'])
|
||||
BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment'])
|
||||
NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** ban_counter
|
||||
NET_BAN_TIME = max([BAN_TIME, min([NET_BAN_TIME, MAX_BAN_TIME])])
|
||||
return NET_BAN_TIME
|
||||
|
||||
def isIpNetwork(address):
|
||||
try:
|
||||
ipaddress.ip_network(address, False)
|
||||
@@ -403,21 +384,76 @@ def blacklistUpdate():
|
||||
permBan(net=net, unban=True)
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def quit(signum, frame):
|
||||
global quit_now
|
||||
quit_now = True
|
||||
def sigterm_quit(signum, frame):
|
||||
global clear_before_quit
|
||||
clear_before_quit = True
|
||||
sys.exit(exit_code)
|
||||
|
||||
def berfore_quit():
|
||||
if clear_before_quit:
|
||||
clear()
|
||||
if pubsub is not None:
|
||||
pubsub.unsubscribe()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
refreshF2boptions()
|
||||
atexit.register(berfore_quit)
|
||||
signal.signal(signal.SIGTERM, sigterm_quit)
|
||||
|
||||
# init Logger
|
||||
logger = Logger()
|
||||
|
||||
# init backend
|
||||
backend = sys.argv[1]
|
||||
if backend == "nftables":
|
||||
logger.logInfo('Using NFTables backend')
|
||||
tables = NFTables(chain_name, logger)
|
||||
else:
|
||||
logger.logInfo('Using IPTables backend')
|
||||
tables = IPTables(chain_name, logger)
|
||||
|
||||
# In case a previous session was killed without cleanup
|
||||
clear()
|
||||
|
||||
# Reinit MAILCOW chain
|
||||
# Is called before threads start, no locking
|
||||
logger.logInfo("Initializing mailcow netfilter chain")
|
||||
tables.initChainIPv4()
|
||||
tables.initChainIPv6()
|
||||
|
||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE").lower() in ("y", "yes"):
|
||||
logger.logInfo(f"Skipping {chain_name} isolation")
|
||||
else:
|
||||
logger.logInfo(f"Setting {chain_name} isolation")
|
||||
tables.create_mailcow_isolation_rule("br-mailcow", [3306, 6379, 8983, 12345], os.getenv("MAILCOW_REPLICA_IP"))
|
||||
|
||||
# connect to redis
|
||||
while True:
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
||||
r.ping()
|
||||
pubsub = r.pubsub()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
logger.set_redis(r)
|
||||
|
||||
# rename fail2ban to netfilter
|
||||
if r.exists('F2B_LOG'):
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
# clear bans in redis
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
|
||||
refreshF2boptions()
|
||||
|
||||
watch_thread = Thread(target=watch)
|
||||
watch_thread.daemon = True
|
||||
watch_thread.start()
|
||||
@@ -460,9 +496,6 @@ if __name__ == '__main__':
|
||||
whitelistupdate_thread.daemon = True
|
||||
whitelistupdate_thread.start()
|
||||
|
||||
signal.signal(signal.SIGTERM, quit)
|
||||
atexit.register(clear)
|
||||
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import iptc
|
||||
import time
|
||||
import os
|
||||
|
||||
class IPTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
@@ -211,3 +212,41 @@ class IPTables:
|
||||
target = rule.create_target("SNAT")
|
||||
target.to_source = snat_target
|
||||
return rule
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
try:
|
||||
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), self.chain_name)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
rule = iptc.Rule()
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("DROP")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
# insert mailcow isolation exception rule
|
||||
if _allow != "":
|
||||
rule = iptc.Rule()
|
||||
rule.src = _allow
|
||||
rule.in_interface = f'!{_interface}'
|
||||
rule.out_interface = _interface
|
||||
rule.protocol = 'tcp'
|
||||
rule.create_target("ACCEPT")
|
||||
match = rule.create_match("multiport")
|
||||
match.dports = ','.join(map(str, _dports))
|
||||
|
||||
if rule in chain.rules:
|
||||
chain.delete_rule(rule)
|
||||
chain.insert_rule(rule, position=0)
|
||||
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation: {e}")
|
||||
return False
|
||||
@@ -2,7 +2,10 @@ import time
|
||||
import json
|
||||
|
||||
class Logger:
|
||||
def __init__(self, redis):
|
||||
def __init__(self):
|
||||
self.r = None
|
||||
|
||||
def set_redis(self, redis):
|
||||
self.r = redis
|
||||
|
||||
def log(self, priority, message):
|
||||
@@ -10,8 +13,12 @@ class Logger:
|
||||
tolog['time'] = int(round(time.time()))
|
||||
tolog['priority'] = priority
|
||||
tolog['message'] = message
|
||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
print(message)
|
||||
if self.r is not None:
|
||||
try:
|
||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
except Exception as ex:
|
||||
print('Failed logging to redis: %s' % (ex))
|
||||
|
||||
def logWarn(self, message):
|
||||
self.log('warn', message)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import nftables
|
||||
import ipaddress
|
||||
import os
|
||||
|
||||
class NFTables:
|
||||
def __init__(self, chain_name, logger):
|
||||
@@ -40,6 +41,7 @@ class NFTables:
|
||||
exit_code = 2
|
||||
|
||||
if chain_position > 0:
|
||||
chain_position += 1
|
||||
self.logger.logCrit(f'MAILCOW target is in position {chain_position} in the {filter_table} {chain} table, restarting container to fix it...')
|
||||
err = True
|
||||
exit_code = 2
|
||||
@@ -266,6 +268,17 @@ class NFTables:
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def delete_filter_rule(self, _family:str, _chain: str, _handle:str):
|
||||
delete_command = self.get_base_dict()
|
||||
_rule_opts = {'family': _family,
|
||||
'table': 'filter',
|
||||
'chain': _chain,
|
||||
'handle': _handle }
|
||||
_delete = {'delete': {'rule': _rule_opts} }
|
||||
delete_command["nftables"].append(_delete)
|
||||
|
||||
return self.nft_exec_dict(delete_command)
|
||||
|
||||
def snat_rule(self, _family: str, snat_target: str, source_address: str):
|
||||
chain_name = self.nft_chain_names[_family]['nat']['postrouting']
|
||||
|
||||
@@ -297,8 +310,8 @@ class NFTables:
|
||||
rule_handle = rule["handle"]
|
||||
break
|
||||
|
||||
dest_net = ipaddress.ip_network(source_address)
|
||||
target_net = ipaddress.ip_network(snat_target)
|
||||
dest_net = ipaddress.ip_network(source_address, strict=False)
|
||||
target_net = ipaddress.ip_network(snat_target, strict=False)
|
||||
|
||||
if rule_found:
|
||||
saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"]
|
||||
@@ -309,9 +322,9 @@ class NFTables:
|
||||
|
||||
target_ip = rule["expr"][3]["snat"]["addr"]
|
||||
|
||||
saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len))
|
||||
daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len))
|
||||
current_target_net = ipaddress.ip_network(target_ip)
|
||||
saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len), strict=False)
|
||||
daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len), strict=False)
|
||||
current_target_net = ipaddress.ip_network(target_ip, strict=False)
|
||||
|
||||
match = all((
|
||||
dest_net == saddr_net,
|
||||
@@ -381,7 +394,7 @@ class NFTables:
|
||||
break
|
||||
return chain_handle
|
||||
|
||||
def get_rules_handle(self, _family: str, _table: str, chain_name: str):
|
||||
def get_rules_handle(self, _family: str, _table: str, chain_name: str, _comment_filter = "mailcow"):
|
||||
rule_handle = []
|
||||
# Command: 'nft list chain {family} {table} {chain_name}'
|
||||
_chain_opts = {'family': _family, 'table': _table, 'name': chain_name}
|
||||
@@ -397,7 +410,7 @@ class NFTables:
|
||||
|
||||
rule = _object["rule"]
|
||||
if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name:
|
||||
if rule.get("comment") and rule["comment"] == "mailcow":
|
||||
if rule.get("comment") and rule["comment"] == _comment_filter:
|
||||
rule_handle.append(rule["handle"])
|
||||
return rule_handle
|
||||
|
||||
@@ -405,7 +418,7 @@ class NFTables:
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
expr_opt = []
|
||||
ipaddr_net = ipaddress.ip_network(ipaddr)
|
||||
ipaddr_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } }
|
||||
|
||||
left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} }
|
||||
@@ -439,6 +452,8 @@ class NFTables:
|
||||
continue
|
||||
|
||||
rule = _object["rule"]["expr"][0]["match"]
|
||||
if not "payload" in rule["left"]:
|
||||
continue
|
||||
left_opt = rule["left"]["payload"]
|
||||
if not left_opt["protocol"] == _family:
|
||||
continue
|
||||
@@ -454,7 +469,7 @@ class NFTables:
|
||||
current_rule_net = ipaddress.ip_network(current_rule_ip)
|
||||
|
||||
# ip to ban
|
||||
candidate_net = ipaddress.ip_network(ipaddr)
|
||||
candidate_net = ipaddress.ip_network(ipaddr, strict=False)
|
||||
|
||||
if current_rule_net == candidate_net:
|
||||
rule_handle = _object["rule"]["handle"]
|
||||
@@ -493,3 +508,152 @@ class NFTables:
|
||||
position+=1
|
||||
|
||||
return position if rule_found else False
|
||||
|
||||
def create_mailcow_isolation_rule(self, _interface:str, _dports:list, _allow:str = ""):
|
||||
family = "ip"
|
||||
table = "filter"
|
||||
comment_filter_drop = "mailcow isolation"
|
||||
comment_filter_allow = "mailcow isolation allow"
|
||||
json_command = self.get_base_dict()
|
||||
|
||||
# Delete old mailcow isolation rules
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_drop)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
handles = self.get_rules_handle(family, table, self.chain_name, comment_filter_allow)
|
||||
for handle in handles:
|
||||
self.delete_filter_rule(family, self.chain_name, handle)
|
||||
|
||||
# insert mailcow isolation rule
|
||||
_match_dict_drop = [
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": None
|
||||
}
|
||||
]
|
||||
rule_drop = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_drop,
|
||||
"expr": _match_dict_drop
|
||||
}}}
|
||||
json_command["nftables"].append(rule_drop)
|
||||
|
||||
# insert mailcow isolation allow rule
|
||||
if _allow != "":
|
||||
_match_dict_allow = [
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "ip",
|
||||
"field": "saddr"
|
||||
}
|
||||
},
|
||||
"right": _allow
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "!=",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "iifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"meta": {
|
||||
"key": "oifname"
|
||||
}
|
||||
},
|
||||
"right": _interface
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"op": "==",
|
||||
"left": {
|
||||
"payload": {
|
||||
"protocol": "tcp",
|
||||
"field": "dport"
|
||||
}
|
||||
},
|
||||
"right": {
|
||||
"set": _dports
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"counter": {
|
||||
"packets": 0,
|
||||
"bytes": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"accept": None
|
||||
}
|
||||
]
|
||||
rule_allow = { "insert": { "rule": {
|
||||
"family": family,
|
||||
"table": table,
|
||||
"chain": self.chain_name,
|
||||
"comment": comment_filter_allow,
|
||||
"expr": _match_dict_allow
|
||||
}}}
|
||||
json_command["nftables"].append(rule_allow)
|
||||
|
||||
success = self.nft_exec_dict(json_command)
|
||||
if success == False:
|
||||
self.logger.logCrit(f"Error adding {self.chain_name} isolation")
|
||||
return False
|
||||
|
||||
return True
|
||||
36
data/Dockerfiles/nginx/Dockerfile
Normal file
36
data/Dockerfiles/nginx/Dockerfile
Normal file
@@ -0,0 +1,36 @@
|
||||
FROM nginx:alpine
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ENV PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
RUN apk add --no-cache nginx \
|
||||
python3 py3-pip \
|
||||
supervisor
|
||||
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
|
||||
RUN mkdir -p /etc/nginx/includes
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/nginx/docker-entrypoint.sh /
|
||||
COPY data/Dockerfiles/nginx/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/nginx/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
20
data/Dockerfiles/nginx/docker-entrypoint.sh
Executable file
20
data/Dockerfiles/nginx/docker-entrypoint.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Nginx."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Nginx..."
|
||||
nginx -g "daemon off;"
|
||||
8
data/Dockerfiles/nginx/stop-supervisor.sh
Executable file
8
data/Dockerfiles/nginx/stop-supervisor.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
27
data/Dockerfiles/nginx/supervisord.conf
Normal file
27
data/Dockerfiles/nginx/supervisord.conf
Normal file
@@ -0,0 +1,27 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
|
||||
[program:syslog-ng]
|
||||
command=/usr/sbin/syslog-ng --foreground --no-caps
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
priority=1
|
||||
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
priority=2
|
||||
startretries=10
|
||||
autorestart=true
|
||||
stopwaitsecs=120
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM alpine:3.19
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
@@ -32,6 +32,13 @@ import time
|
||||
import magic
|
||||
import re
|
||||
|
||||
skip_olefy = os.getenv('SKIP_OLEFY', '')
|
||||
|
||||
if skip_olefy.lower() in ['yes', 'y']:
|
||||
print("SKIP_OLEFY=y, skipping Olefy...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(0)
|
||||
|
||||
# merge variables from /etc/olefy.conf and the defaults
|
||||
olefy_listen_addr_string = os.getenv('OLEFY_BINDADDRESS', '127.0.0.1,::1')
|
||||
olefy_listen_port = int(os.getenv('OLEFY_BINDPORT', '10050'))
|
||||
@@ -113,7 +120,7 @@ def oletools( stream, tmp_file_name, lid ):
|
||||
out = bytes(out.decode('utf-8', 'ignore').replace(' ', ' ').replace('\t', '').replace('\n', '').replace('XLMMacroDeobfuscator: pywin32 is not installed (only is required if you want to use MS Excel)', ''), encoding="utf-8")
|
||||
failed = False
|
||||
if out.__len__() < 30:
|
||||
logger.error('{} olevba returned <30 chars - rc: {!r}, response: {!r}, error: {!r}'.format(lid,cmd_tmp.returncode,
|
||||
logger.error('{} olevba returned <30 chars - rc: {!r}, response: {!r}, error: {!r}'.format(lid,cmd_tmp.returncode,
|
||||
out.decode('utf-8', 'ignore'), err.decode('utf-8', 'ignore')))
|
||||
out = b'[ { "error": "Unhandled error - too short olevba response" } ]'
|
||||
failed = True
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
FROM php:8.2-fpm-alpine3.18
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM php:8.2-fpm-alpine3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG APCU_PECL_VERSION=5.1.23
|
||||
ARG APCU_PECL_VERSION=5.1.24
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG IMAGICK_PECL_VERSION=3.7.0
|
||||
ARG IMAGICK_PECL_VERSION=3.8.0
|
||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.6
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.8
|
||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MEMCACHED_PECL_VERSION=3.2.0
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG REDIS_PECL_VERSION=6.0.2
|
||||
ARG REDIS_PECL_VERSION=6.1.0
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG COMPOSER_VERSION=2.6.6
|
||||
ARG COMPOSER_VERSION=2.8.6
|
||||
|
||||
RUN apk add -U --no-cache autoconf \
|
||||
aspell-dev \
|
||||
@@ -62,6 +63,7 @@ RUN apk add -U --no-cache autoconf \
|
||||
samba-client \
|
||||
zlib-dev \
|
||||
tzdata \
|
||||
python3 py3-pip \
|
||||
&& pecl install APCu-${APCU_PECL_VERSION} \
|
||||
&& pecl install imagick-${IMAGICK_PECL_VERSION} \
|
||||
&& pecl install mailparse-${MAILPARSE_PECL_VERSION} \
|
||||
@@ -71,12 +73,12 @@ RUN apk add -U --no-cache autoconf \
|
||||
&& pecl clear-cache \
|
||||
&& docker-php-ext-configure intl \
|
||||
&& docker-php-ext-configure exif \
|
||||
&& docker-php-ext-configure gd --with-freetype=/usr/include/ \
|
||||
&& docker-php-ext-configure gd --with-freetype=/usr/include/ \
|
||||
--with-jpeg=/usr/include/ \
|
||||
--with-webp \
|
||||
--with-xpm \
|
||||
--with-avif \
|
||||
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets sysvsem zip bcmath gmp \
|
||||
&& docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \
|
||||
&& docker-php-ext-configure imap --with-imap --with-imap-ssl \
|
||||
&& docker-php-ext-install -j 4 imap \
|
||||
&& curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \
|
||||
@@ -106,8 +108,26 @@ RUN apk add -U --no-cache autoconf \
|
||||
pcre-dev \
|
||||
zlib-dev
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/phpfpm/docker-entrypoint.sh /
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["php-fpm"]
|
||||
|
||||
@@ -1,209 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
function array_by_comma { local IFS=","; echo "$*"; }
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Check mysql_upgrade (master and slave)
|
||||
CONTAINER_ID=
|
||||
until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
sleep 2
|
||||
done
|
||||
echo "MySQL @ ${CONTAINER_ID}"
|
||||
SQL_LOOP_C=0
|
||||
SQL_CHANGED=0
|
||||
until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
||||
if [ ${SQL_LOOP_C} -gt 4 ]; then
|
||||
echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
|
||||
break
|
||||
fi
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
|
||||
SQL_LOOP_C=$((SQL_LOOP_C+1))
|
||||
echo "SQL upgrade iteration #${SQL_LOOP_C}"
|
||||
if [[ ${SQL_UPGRADE_STATUS} == 'warning' ]]; then
|
||||
SQL_CHANGED=1
|
||||
echo "MySQL applied an upgrade, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
sleep 3
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL to return, please wait"
|
||||
sleep 2
|
||||
done
|
||||
continue
|
||||
elif [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; then
|
||||
echo "MySQL is up-to-date - debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
else
|
||||
echo "No valid reponse for mysql_upgrade was received, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
fi
|
||||
done
|
||||
|
||||
# doing post-installation stuff, if SQL was upgraded (master and slave)
|
||||
if [ ${SQL_CHANGED} -eq 1 ]; then
|
||||
POSTFIX=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
|
||||
echo "Could not determine Postfix container ID, skipping Postfix restart."
|
||||
else
|
||||
echo "Restarting Postfix"
|
||||
curl -X POST --silent --insecure https://dockerapi/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
echo "Sleeping 5 seconds..."
|
||||
sleep 5
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check mysql tz import (master and slave)
|
||||
TZ_CHECK=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
||||
if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
echo "MySQL mysql_tzinfo_to_sql - debug output:"
|
||||
echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
|
||||
fi
|
||||
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing..."
|
||||
# Set a default release format
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_RELEASE_FORMAT) ]]; then
|
||||
${REDIS_CMDLINE} --raw SET Q_RELEASE_FORMAT raw
|
||||
fi
|
||||
|
||||
# Set max age of q items - if unset
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_MAX_AGE) ]]; then
|
||||
${REDIS_CMDLINE} --raw SET Q_MAX_AGE 365
|
||||
fi
|
||||
|
||||
# Set default password policy - if unset
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw HGET PASSWD_POLICY length) ]]; then
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY length 6
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY chars 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY special_chars 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY lowerupper 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY numbers 0
|
||||
fi
|
||||
|
||||
# Trigger db init
|
||||
echo "Running DB init..."
|
||||
php -c /usr/local/etc/php -f /web/inc/init_db.inc.php
|
||||
|
||||
# Recreating domain map
|
||||
echo "Rebuilding domain map in Redis..."
|
||||
declare -a DOMAIN_ARR
|
||||
${REDIS_CMDLINE} DEL DOMAIN_MAP > /dev/null
|
||||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
|
||||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
|
||||
|
||||
if [[ ! -z ${DOMAIN_ARR} ]]; then
|
||||
for domain in "${DOMAIN_ARR[@]}"; do
|
||||
${REDIS_CMDLINE} HSET DOMAIN_MAP ${domain} 1 > /dev/null
|
||||
done
|
||||
fi
|
||||
|
||||
# Set API options if env vars are not empty
|
||||
if [[ ${API_ALLOW_FROM} != "invalid" ]] && [[ ! -z ${API_ALLOW_FROM} ]]; then
|
||||
IFS=',' read -r -a API_ALLOW_FROM_ARR <<< "${API_ALLOW_FROM}"
|
||||
declare -a VALIDATED_API_ALLOW_FROM_ARR
|
||||
REGEX_IP6='^([0-9a-fA-F]{0,4}:){1,7}[0-9a-fA-F]{0,4}(/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$'
|
||||
REGEX_IP4='^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(/([0-9]|[1-2][0-9]|3[0-2]))?$'
|
||||
for IP in "${API_ALLOW_FROM_ARR[@]}"; do
|
||||
if [[ ${IP} =~ ${REGEX_IP6} ]] || [[ ${IP} =~ ${REGEX_IP4} ]]; then
|
||||
VALIDATED_API_ALLOW_FROM_ARR+=("${IP}")
|
||||
fi
|
||||
done
|
||||
VALIDATED_IPS=$(array_by_comma ${VALIDATED_API_ALLOW_FROM_ARR[*]})
|
||||
if [[ ! -z ${VALIDATED_IPS} ]]; then
|
||||
if [[ ${API_KEY} != "invalid" ]] && [[ ! -z ${API_KEY} ]]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'rw';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY}", "1", "${VALIDATED_IPS}", "rw");
|
||||
EOF
|
||||
fi
|
||||
if [[ ${API_KEY_READ_ONLY} != "invalid" ]] && [[ ! -z ${API_KEY_READ_ONLY} ]]; then
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'ro';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY_READ_ONLY}", "1", "${VALIDATED_IPS}", "ro");
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create events (master only, STATUS for event on slave will be SLAVESIDE_DISABLED)
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DROP EVENT IF EXISTS clean_spamalias;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_spamalias
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
DROP EVENT IF EXISTS clean_oauth2;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_oauth2
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
DROP EVENT IF EXISTS clean_sasl_log;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_sasl_log
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE sasl_log.* FROM sasl_log
|
||||
LEFT JOIN (
|
||||
SELECT username, service, MAX(datetime) AS lastdate
|
||||
FROM sasl_log
|
||||
GROUP BY username, service
|
||||
) AS last ON sasl_log.username = last.username AND sasl_log.service = last.service
|
||||
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY) AND datetime < lastdate;
|
||||
DELETE FROM sasl_log
|
||||
WHERE username NOT IN (SELECT username FROM mailbox) AND
|
||||
datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Create dummy for custom overrides of mailcow style
|
||||
[[ ! -f /web/css/build/0081-custom-mailcow.css ]] && echo '/* Autogenerated by mailcow */' > /web/css/build/0081-custom-mailcow.css
|
||||
|
||||
# Fix permissions for global filters
|
||||
chown -R 82:82 /global_sieve/*
|
||||
|
||||
# Fix permissions on twig cache folder
|
||||
chown -R 82:82 /web/templates/cache
|
||||
# Clear cache
|
||||
find /web/templates/cache/* -not -name '.gitkeep' -delete
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
@@ -212,4 +8,13 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting PHP-FPM."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting PHP-FPM..."
|
||||
exec "$@"
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL C
|
||||
@@ -33,23 +34,31 @@ RUN groupadd -g 102 postfix \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
tzdata \
|
||||
python3 python3-pip \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale \
|
||||
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
|
||||
&& chmod +x /usr/local/sbin/postconf
|
||||
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY postfix.sh /opt/postfix.sh
|
||||
COPY rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
||||
COPY rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
||||
COPY whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
RUN chmod +x /opt/postfix.sh \
|
||||
/usr/local/bin/rspamd-pipe-ham \
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/postfix/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/postfix/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/postfix/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/postfix/rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
||||
COPY data/Dockerfiles/postfix/rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
||||
COPY data/Dockerfiles/postfix/whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
||||
COPY data/Dockerfiles/postfix/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/postfix/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
RUN chmod +x /usr/local/bin/rspamd-pipe-ham \
|
||||
/docker-entrypoint.sh \
|
||||
/usr/local/bin/rspamd-pipe-spam \
|
||||
/usr/local/bin/whitelist_forwardinghosts.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
@@ -57,6 +66,5 @@ RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
EXPOSE 588
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -8,8 +8,33 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Postfix."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
if grep -qE '\!SSLv2|\!SSLv3|>=TLSv1(\.[0-1])?$' /opt/postfix/conf/main.cf /opt/postfix/conf/extra.cf; then
|
||||
sed -i '/\[openssl_init\]/a ssl_conf = ssl_configuration' /etc/ssl/openssl.cnf
|
||||
|
||||
echo "[ssl_configuration]" >> /etc/ssl/openssl.cnf
|
||||
echo "system_default = tls_system_default" >> /etc/ssl/openssl.cnf
|
||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
|
||||
|
||||
# Start Postfix
|
||||
postconf -c /opt/postfix/conf > /dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Postfix configuration error, refusing to start."
|
||||
exit 1
|
||||
else
|
||||
echo "Bootstrap succeeded. Starting Postfix..."
|
||||
postfix -c /opt/postfix/conf start
|
||||
sleep 126144000
|
||||
fi
|
||||
|
||||
@@ -1,525 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
trap "postfix stop" EXIT
|
||||
|
||||
[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
cat <<EOF > /etc/aliases
|
||||
# Autogenerated by mailcow
|
||||
null: /dev/null
|
||||
watchdog: /dev/null
|
||||
ham: "|/usr/local/bin/rspamd-pipe-ham"
|
||||
spam: "|/usr/local/bin/rspamd-pipe-spam"
|
||||
EOF
|
||||
newaliases;
|
||||
|
||||
# create sni configuration
|
||||
if [[ "${SKIP_LETS_ENCRYPT}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -n "" > /opt/postfix/conf/sni.map
|
||||
else
|
||||
echo -n "" > /opt/postfix/conf/sni.map;
|
||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
||||
continue;
|
||||
fi
|
||||
IFS=" " read -r -a domains <<< "$(cat "${cert_dir}domains")"
|
||||
for domain in "${domains[@]}"; do
|
||||
echo -n "${domain} ${cert_dir}key.pem ${cert_dir}cert.pem" >> /opt/postfix/conf/sni.map;
|
||||
echo "" >> /opt/postfix/conf/sni.map;
|
||||
done
|
||||
done
|
||||
fi
|
||||
postmap -F hash:/opt/postfix/conf/sni.map;
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_ne.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT IF(EXISTS(SELECT address, domain FROM alias
|
||||
WHERE address = '%s'
|
||||
AND domain IN (
|
||||
SELECT domain FROM domain
|
||||
WHERE backupmx = '1'
|
||||
AND relay_all_recipients = '1'
|
||||
AND relay_unknown_only = '1')
|
||||
|
||||
), 'lmtp:inet:dovecot:24', NULL) AS 'transport'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_recipient_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT DISTINCT
|
||||
CASE WHEN '%d' IN (
|
||||
SELECT domain FROM domain
|
||||
WHERE relay_all_recipients=1
|
||||
AND domain='%d'
|
||||
AND backupmx=1
|
||||
)
|
||||
THEN '%s' ELSE (
|
||||
SELECT goto FROM alias WHERE address='%s' AND active='1'
|
||||
)
|
||||
END AS result;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_policy_override_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT(policy, ' ', parameters) AS tls_policy FROM tls_policy_override WHERE active = '1' AND dest = '%s'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_enforce_in_policy.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT IF(EXISTS(
|
||||
SELECT 'TLS_ACTIVE' FROM alias
|
||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE (address='%s'
|
||||
OR address IN (
|
||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
||||
WHERE alias_domain='%d'
|
||||
)
|
||||
) AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_in')) = '1' AND mailbox.active = '1'
|
||||
), 'reject_plaintext_session', NULL) AS 'tls_enforce_in';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_dependent_default_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT GROUP_CONCAT(transport SEPARATOR '') AS transport_maps
|
||||
FROM (
|
||||
SELECT IF(EXISTS(SELECT 'smtp_type' FROM alias
|
||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE (address = '%s'
|
||||
OR address IN (
|
||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_out')) = '1'
|
||||
AND mailbox.active = '1'
|
||||
), 'smtp_enforced_tls:', 'smtp:') AS 'transport'
|
||||
UNION ALL
|
||||
SELECT COALESCE(
|
||||
(SELECT hostname FROM relayhosts
|
||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (
|
||||
mailbox.username IN (SELECT alias.goto from alias
|
||||
JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE alias.active = '1'
|
||||
AND alias.address = '%s'
|
||||
AND alias.address NOT LIKE '@%%'
|
||||
)
|
||||
)
|
||||
),
|
||||
(SELECT hostname FROM relayhosts
|
||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (domain.domain = '%d'
|
||||
OR domain.domain IN (
|
||||
SELECT target_domain FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
) AS transport_view;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT('smtp_via_transport_maps:', nexthop) AS transport FROM transports
|
||||
WHERE active = '1'
|
||||
AND destination = '%s';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_resource_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT 'null@localhost' FROM mailbox
|
||||
WHERE kind REGEXP 'location|thing|group' AND username = '%s';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_sender_dependent.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM relayhosts
|
||||
WHERE id IN (
|
||||
SELECT COALESCE(
|
||||
(SELECT id FROM relayhosts
|
||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (domain.domain = '%d'
|
||||
OR domain.domain IN (
|
||||
SELECT target_domain FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
),
|
||||
(SELECT id FROM relayhosts
|
||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (
|
||||
mailbox.username IN (
|
||||
SELECT alias.goto from alias
|
||||
JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE alias.active = '1'
|
||||
AND alias.address = '%s'
|
||||
AND alias.address NOT LIKE '@%%'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
AND active = '1'
|
||||
AND username != '';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM transports
|
||||
WHERE nexthop = '%s'
|
||||
AND active = '1'
|
||||
AND username != ''
|
||||
LIMIT 1;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_domain_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT username FROM mailbox, alias_domain
|
||||
WHERE alias_domain.alias_domain = '%d'
|
||||
AND mailbox.username = CONCAT('%u', '@', alias_domain.target_domain)
|
||||
AND (mailbox.active = '1' OR mailbox.active = '2')
|
||||
AND alias_domain.active='1'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT goto FROM alias
|
||||
WHERE address='%s'
|
||||
AND (active='1' OR active='2');
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_bcc_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT bcc_dest FROM bcc_maps
|
||||
WHERE local_dest='%s'
|
||||
AND type='rcpt'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_bcc_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT bcc_dest FROM bcc_maps
|
||||
WHERE local_dest='%s'
|
||||
AND type='sender'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_canonical_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT new_dest FROM recipient_maps
|
||||
WHERE old_dest='%s'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_domains_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT alias_domain from alias_domain WHERE alias_domain='%s' AND active='1'
|
||||
UNION
|
||||
SELECT domain FROM domain
|
||||
WHERE domain='%s'
|
||||
AND active = '1'
|
||||
AND backupmx = '0'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_mailbox_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%u/') FROM mailbox WHERE username='%s' AND (active = '1' OR active = '2')
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_relay_domain_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT domain FROM domain WHERE domain='%s' AND backupmx = '1' AND active = '1'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_sender_acl.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
# First select queries domain and alias_domain to determine if domains are active.
|
||||
query = SELECT goto FROM alias
|
||||
WHERE id IN (
|
||||
SELECT COALESCE (
|
||||
(
|
||||
SELECT id FROM alias
|
||||
WHERE address='%s'
|
||||
AND (active='1' OR active='2')
|
||||
), (
|
||||
SELECT id FROM alias
|
||||
WHERE address='@%d'
|
||||
AND (active='1' OR active='2')
|
||||
)
|
||||
)
|
||||
)
|
||||
AND active='1'
|
||||
AND (domain IN
|
||||
(SELECT domain FROM domain
|
||||
WHERE domain='%d'
|
||||
AND active='1')
|
||||
OR domain in (
|
||||
SELECT alias_domain FROM alias_domain
|
||||
WHERE alias_domain='%d'
|
||||
AND active='1'
|
||||
)
|
||||
)
|
||||
UNION
|
||||
SELECT logged_in_as FROM sender_acl
|
||||
WHERE send_as='@%d'
|
||||
OR send_as='%s'
|
||||
OR send_as='*'
|
||||
OR send_as IN (
|
||||
SELECT CONCAT('@',target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d')
|
||||
OR send_as IN (
|
||||
SELECT CONCAT('%u','@',target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d')
|
||||
AND logged_in_as NOT IN (
|
||||
SELECT goto FROM alias
|
||||
WHERE address='%s')
|
||||
UNION
|
||||
SELECT username FROM mailbox, alias_domain
|
||||
WHERE alias_domain.alias_domain = '%d'
|
||||
AND mailbox.username = CONCAT('%u','@',alias_domain.target_domain)
|
||||
AND (mailbox.active = '1' OR mailbox.active ='2')
|
||||
AND alias_domain.active='1';
|
||||
EOF
|
||||
|
||||
# MX based routing
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_mbr_access_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT('FILTER smtp_via_transport_maps:', nexthop) as transport FROM transports
|
||||
WHERE '%s' REGEXP destination
|
||||
AND active='1'
|
||||
AND is_mx_based='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_spamalias_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT goto FROM spamalias
|
||||
WHERE address='%s'
|
||||
AND validity >= UNIX_TIMESTAMP()
|
||||
EOF
|
||||
|
||||
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# This file can be edited.
|
||||
# Delete this file and restart postfix container to revert any changes.
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
ix.dnsbl.manitu.net*2
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
dnsbl.sorbs.net=127.0.0.10*8
|
||||
dnsbl.sorbs.net=127.0.0.5*6
|
||||
dnsbl.sorbs.net=127.0.0.7*3
|
||||
dnsbl.sorbs.net=127.0.0.8*2
|
||||
dnsbl.sorbs.net=127.0.0.6*2
|
||||
dnsbl.sorbs.net=127.0.0.9*2
|
||||
EOF
|
||||
fi
|
||||
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
|
||||
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "\e[33mChecking if ASN for your IP is listed for Spamhaus Bad ASN List...\e[0m"
|
||||
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
|
||||
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[10;11]*8
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.3*4
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.2*3
|
||||
postscreen_dnsbl_reply_map = texthash:/opt/postfix/conf/dnsbl_reply.map
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/dnsbl_reply.map
|
||||
# Autogenerated by mailcow, using Spamhaus DQS reply domains
|
||||
${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net sbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.xbl.dq.spamhaus.net xbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.pbl.dq.spamhaus.net pbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net zen.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net dbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net zrd.spamhaus.org
|
||||
EOF
|
||||
)
|
||||
else
|
||||
if [ -f "/opt/postfix/conf/dnsbl_reply.map" ]; then
|
||||
rm /opt/postfix/conf/dnsbl_reply.map
|
||||
fi
|
||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
||||
if [ "$response" -eq 503 ]; then
|
||||
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
elif [ "$response" -eq 200 ]; then
|
||||
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
zen.spamhaus.org=127.0.0.[10;11]*8
|
||||
zen.spamhaus.org=127.0.0.[4..7]*6
|
||||
zen.spamhaus.org=127.0.0.3*4
|
||||
zen.spamhaus.org=127.0.0.2*3
|
||||
EOF
|
||||
)
|
||||
|
||||
else
|
||||
echo -e "\e[31mWe couldn't determine your AS... (maybe DNS/Network issue?) Response Code: $response\e[0m"
|
||||
echo -e "\e[33mDeactivating Spamhaus DNS Blocklists to be on the safe site!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Reset main.cf
|
||||
sed -i '/Overrides/q' /opt/postfix/conf/main.cf
|
||||
echo >> /opt/postfix/conf/main.cf
|
||||
# Append postscreen dnsbl sites to main.cf
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "${DNSBL_CONFIG}\n${SPAMHAUS_DNSBL_CONFIG}" >> /opt/postfix/conf/main.cf
|
||||
fi
|
||||
# Append user overrides
|
||||
echo -e "\n# User Overrides" >> /opt/postfix/conf/main.cf
|
||||
touch /opt/postfix/conf/extra.cf
|
||||
sed -i '/\$myhostname/! { /myhostname/d }' /opt/postfix/conf/extra.cf
|
||||
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
|
||||
cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
|
||||
|
||||
if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then
|
||||
echo "Creating dummy custom_transport.pcre"
|
||||
touch /opt/postfix/conf/custom_transport.pcre
|
||||
fi
|
||||
|
||||
if [[ ! -f /opt/postfix/conf/custom_postscreen_whitelist.cidr ]]; then
|
||||
echo "Creating dummy custom_postscreen_whitelist.cidr"
|
||||
cat <<EOF > /opt/postfix/conf/custom_postscreen_whitelist.cidr
|
||||
# Autogenerated by mailcow
|
||||
# Rules are evaluated in the order as specified.
|
||||
# Blacklist 192.168.* except 192.168.0.1.
|
||||
# 192.168.0.1 permit
|
||||
# 192.168.0.0/16 reject
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Fix Postfix permissions
|
||||
chown -R root:postfix /opt/postfix/conf/sql/ /opt/postfix/conf/custom_transport.pcre
|
||||
chmod 640 /opt/postfix/conf/sql/*.cf /opt/postfix/conf/custom_transport.pcre
|
||||
chgrp -R postdrop /var/spool/postfix/public
|
||||
chgrp -R postdrop /var/spool/postfix/maildrop
|
||||
postfix set-permissions
|
||||
|
||||
# Check Postfix configuration
|
||||
postconf -c /opt/postfix/conf > /dev/null
|
||||
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Postfix configuration error, refusing to start."
|
||||
exit 1
|
||||
else
|
||||
postfix -c /opt/postfix/conf start
|
||||
sleep 126144000
|
||||
fi
|
||||
@@ -11,13 +11,14 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:postfix]
|
||||
command=/opt/postfix.sh
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
startsecs=10
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -20,6 +20,7 @@ destination d_redis_ui_log {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -28,6 +29,7 @@ destination d_redis_f2b_channel {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -20,6 +20,7 @@ destination d_redis_ui_log {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -28,6 +29,7 @@ destination d_redis_f2b_channel {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG CODENAME=bullseye
|
||||
ENV LC_ALL C
|
||||
ARG RSPAMD_VER=rspamd_3.11.1-1~ab0b44951
|
||||
ARG CODENAME=bookworm
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
tzdata \
|
||||
ca-certificates \
|
||||
gnupg2 \
|
||||
apt-transport-https \
|
||||
dnsutils \
|
||||
netcat \
|
||||
&& apt-key adv --fetch-keys https://rspamd.com/apt-stable/gpg.key \
|
||||
&& echo "deb https://rspamd.com/apt-stable/ $CODENAME main" > /etc/apt/sources.list.d/rspamd.list \
|
||||
&& apt-get update \
|
||||
&& apt-get --no-install-recommends -y install rspamd redis-tools procps nano \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
netcat-traditional \
|
||||
wget \
|
||||
redis-tools \
|
||||
procps \
|
||||
nano \
|
||||
lua-cjson \
|
||||
python3 python3-pip \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
||||
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/*\
|
||||
&& apt-get autoremove --purge \
|
||||
&& apt-get clean \
|
||||
&& mkdir -p /run/rspamd \
|
||||
@@ -24,13 +30,20 @@ RUN apt-get update && apt-get install -y \
|
||||
&& echo 'alias ll="ls -la --color"' >> ~/.bashrc \
|
||||
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
||||
|
||||
COPY settings.conf /etc/rspamd/settings.conf
|
||||
COPY metadata_exporter.lua /usr/share/rspamd/plugins/metadata_exporter.lua
|
||||
COPY set_worker_password.sh /set_worker_password.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/rspamd/settings.conf /etc/rspamd/settings.conf
|
||||
COPY data/Dockerfiles/rspamd/set_worker_password.sh /set_worker_password.sh
|
||||
COPY data/Dockerfiles/rspamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["/usr/bin/rspamd", "-f", "-u", "_rspamd", "-g", "_rspamd"]
|
||||
|
||||
@@ -1,121 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
until nc phpfpm 9001 -z; do
|
||||
echo "Waiting for PHP on port 9001..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
until nc phpfpm 9002 -z; do
|
||||
echo "Waiting for PHP on port 9002..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
mkdir -p /etc/rspamd/plugins.d \
|
||||
/etc/rspamd/custom
|
||||
|
||||
touch /etc/rspamd/rspamd.conf.local \
|
||||
/etc/rspamd/rspamd.conf.override
|
||||
|
||||
chmod 755 /var/lib/rspamd
|
||||
|
||||
|
||||
[[ ! -f /etc/rspamd/override.d/worker-controller-password.inc ]] && echo '# Autogenerated by mailcow' > /etc/rspamd/override.d/worker-controller-password.inc
|
||||
|
||||
echo ${IPV4_NETWORK}.0/24 > /etc/rspamd/custom/mailcow_networks.map
|
||||
echo ${IPV6_NETWORK} >> /etc/rspamd/custom/mailcow_networks.map
|
||||
|
||||
DOVECOT_V4=
|
||||
DOVECOT_V6=
|
||||
until [[ ! -z ${DOVECOT_V4} ]]; do
|
||||
DOVECOT_V4=$(dig a dovecot +short)
|
||||
DOVECOT_V6=$(dig aaaa dovecot +short)
|
||||
[[ ! -z ${DOVECOT_V4} ]] && break;
|
||||
echo "Waiting for Dovecot..."
|
||||
sleep 3
|
||||
done
|
||||
echo ${DOVECOT_V4}/32 > /etc/rspamd/custom/dovecot_trusted.map
|
||||
if [[ ! -z ${DOVECOT_V6} ]]; then
|
||||
echo ${DOVECOT_V6}/128 >> /etc/rspamd/custom/dovecot_trusted.map
|
||||
fi
|
||||
|
||||
RSPAMD_V4=
|
||||
RSPAMD_V6=
|
||||
until [[ ! -z ${RSPAMD_V4} ]]; do
|
||||
RSPAMD_V4=$(dig a rspamd +short)
|
||||
RSPAMD_V6=$(dig aaaa rspamd +short)
|
||||
[[ ! -z ${RSPAMD_V4} ]] && break;
|
||||
echo "Waiting for Rspamd..."
|
||||
sleep 3
|
||||
done
|
||||
echo ${RSPAMD_V4}/32 > /etc/rspamd/custom/rspamd_trusted.map
|
||||
if [[ ! -z ${RSPAMD_V6} ]]; then
|
||||
echo ${RSPAMD_V6}/128 >> /etc/rspamd/custom/rspamd_trusted.map
|
||||
fi
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
read_servers = "redis:6379";
|
||||
write_servers = "${REDIS_SLAVEOF_IP}:${REDIS_SLAVEOF_PORT}";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @redis-mailcow..."
|
||||
sleep 2
|
||||
done
|
||||
until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @${REDIS_SLAVEOF_IP}..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
|
||||
else
|
||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
servers = "redis:6379";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis slave..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow SLAVEOF NO ONE
|
||||
fi
|
||||
|
||||
# Provide additional lua modules
|
||||
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
|
||||
|
||||
chown -R _rspamd:_rspamd /var/lib/rspamd \
|
||||
/etc/rspamd/local.d \
|
||||
/etc/rspamd/override.d \
|
||||
/etc/rspamd/rspamd.conf.local \
|
||||
/etc/rspamd/rspamd.conf.override \
|
||||
/etc/rspamd/plugins.d
|
||||
|
||||
# Fix missing default global maps, if any
|
||||
# These exists in mailcow UI and should not be removed
|
||||
touch /etc/rspamd/custom/global_mime_from_blacklist.map \
|
||||
/etc/rspamd/custom/global_rcpt_blacklist.map \
|
||||
/etc/rspamd/custom/global_smtp_from_blacklist.map \
|
||||
/etc/rspamd/custom/global_mime_from_whitelist.map \
|
||||
/etc/rspamd/custom/global_rcpt_whitelist.map \
|
||||
/etc/rspamd/custom/global_smtp_from_whitelist.map \
|
||||
/etc/rspamd/custom/bad_languages.map \
|
||||
/etc/rspamd/custom/sa-rules \
|
||||
/etc/rspamd/custom/dovecot_trusted.map \
|
||||
/etc/rspamd/custom/rspamd_trusted.map \
|
||||
/etc/rspamd/custom/mailcow_networks.map \
|
||||
/etc/rspamd/custom/ip_wl.map \
|
||||
/etc/rspamd/custom/fishy_tlds.map \
|
||||
/etc/rspamd/custom/bad_words.map \
|
||||
/etc/rspamd/custom/bad_asn.map \
|
||||
/etc/rspamd/custom/bad_words_de.map \
|
||||
/etc/rspamd/custom/bulk_header.map \
|
||||
/etc/rspamd/custom/bad_header.map
|
||||
|
||||
# www-data (82) group needs to write to these files
|
||||
chown _rspamd:_rspamd /etc/rspamd/custom/
|
||||
chmod 0755 /etc/rspamd/custom/.
|
||||
chown -R 82:82 /etc/rspamd/custom/*
|
||||
chmod 644 -R /etc/rspamd/custom/*
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
@@ -124,4 +8,13 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Rspamd."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Rspamd..."
|
||||
exec "$@"
|
||||
|
||||
@@ -1,632 +0,0 @@
|
||||
--[[
|
||||
Copyright (c) 2016, Andrew Lewis <nerf@judo.za.org>
|
||||
Copyright (c) 2016, Vsevolod Stakhov <vsevolod@highsecure.ru>
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
]]--
|
||||
|
||||
if confighelp then
|
||||
return
|
||||
end
|
||||
|
||||
-- A plugin that pushes metadata (or whole messages) to external services
|
||||
|
||||
local redis_params
|
||||
local lua_util = require "lua_util"
|
||||
local rspamd_http = require "rspamd_http"
|
||||
local rspamd_util = require "rspamd_util"
|
||||
local rspamd_logger = require "rspamd_logger"
|
||||
local ucl = require "ucl"
|
||||
local E = {}
|
||||
local N = 'metadata_exporter'
|
||||
|
||||
local settings = {
|
||||
pusher_enabled = {},
|
||||
pusher_format = {},
|
||||
pusher_select = {},
|
||||
mime_type = 'text/plain',
|
||||
defer = false,
|
||||
mail_from = '',
|
||||
mail_to = 'postmaster@localhost',
|
||||
helo = 'rspamd',
|
||||
email_template = [[From: "Rspamd" <$mail_from>
|
||||
To: $mail_to
|
||||
Subject: Spam alert
|
||||
Date: $date
|
||||
MIME-Version: 1.0
|
||||
Message-ID: <$our_message_id>
|
||||
Content-type: text/plain; charset=utf-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
Authenticated username: $user
|
||||
IP: $ip
|
||||
Queue ID: $qid
|
||||
SMTP FROM: $from
|
||||
SMTP RCPT: $rcpt
|
||||
MIME From: $header_from
|
||||
MIME To: $header_to
|
||||
MIME Date: $header_date
|
||||
Subject: $header_subject
|
||||
Message-ID: $message_id
|
||||
Action: $action
|
||||
Score: $score
|
||||
Symbols: $symbols]],
|
||||
}
|
||||
|
||||
local function get_general_metadata(task, flatten, no_content)
|
||||
local r = {}
|
||||
local ip = task:get_from_ip()
|
||||
if ip and ip:is_valid() then
|
||||
r.ip = tostring(ip)
|
||||
else
|
||||
r.ip = 'unknown'
|
||||
end
|
||||
r.user = task:get_user() or 'unknown'
|
||||
r.qid = task:get_queue_id() or 'unknown'
|
||||
r.subject = task:get_subject() or 'unknown'
|
||||
r.action = task:get_metric_action('default')
|
||||
|
||||
local s = task:get_metric_score('default')[1]
|
||||
r.score = flatten and string.format('%.2f', s) or s
|
||||
|
||||
local fuzzy = task:get_mempool():get_variable("fuzzy_hashes", "fstrings")
|
||||
if fuzzy and #fuzzy > 0 then
|
||||
local fz = {}
|
||||
for _,h in ipairs(fuzzy) do
|
||||
table.insert(fz, h)
|
||||
end
|
||||
if not flatten then
|
||||
r.fuzzy = fz
|
||||
else
|
||||
r.fuzzy = table.concat(fz, ', ')
|
||||
end
|
||||
else
|
||||
r.fuzzy = 'unknown'
|
||||
end
|
||||
|
||||
local rcpt = task:get_recipients('smtp')
|
||||
if rcpt then
|
||||
local l = {}
|
||||
for _, a in ipairs(rcpt) do
|
||||
table.insert(l, a['addr'])
|
||||
end
|
||||
if not flatten then
|
||||
r.rcpt = l
|
||||
else
|
||||
r.rcpt = table.concat(l, ', ')
|
||||
end
|
||||
else
|
||||
r.rcpt = 'unknown'
|
||||
end
|
||||
local from = task:get_from('smtp')
|
||||
if ((from or E)[1] or E).addr then
|
||||
r.from = from[1].addr
|
||||
else
|
||||
r.from = 'unknown'
|
||||
end
|
||||
local syminf = task:get_symbols_all()
|
||||
if flatten then
|
||||
local l = {}
|
||||
for _, sym in ipairs(syminf) do
|
||||
local txt
|
||||
if sym.options then
|
||||
local topt = table.concat(sym.options, ', ')
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')' .. ' [' .. topt .. ']'
|
||||
else
|
||||
txt = sym.name .. '(' .. string.format('%.2f', sym.score) .. ')'
|
||||
end
|
||||
table.insert(l, txt)
|
||||
end
|
||||
r.symbols = table.concat(l, '\n\t')
|
||||
else
|
||||
r.symbols = syminf
|
||||
end
|
||||
local function process_header(name)
|
||||
local hdr = task:get_header_full(name)
|
||||
if hdr then
|
||||
local l = {}
|
||||
for _, h in ipairs(hdr) do
|
||||
table.insert(l, h.decoded)
|
||||
end
|
||||
if not flatten then
|
||||
return l
|
||||
else
|
||||
return table.concat(l, '\n')
|
||||
end
|
||||
else
|
||||
return 'unknown'
|
||||
end
|
||||
end
|
||||
if not no_content then
|
||||
r.header_from = process_header('from')
|
||||
r.header_to = process_header('to')
|
||||
r.header_subject = process_header('subject')
|
||||
r.header_date = process_header('date')
|
||||
r.message_id = task:get_message_id()
|
||||
end
|
||||
return r
|
||||
end
|
||||
|
||||
local formatters = {
|
||||
default = function(task)
|
||||
return task:get_content(), {}
|
||||
end,
|
||||
email_alert = function(task, rule, extra)
|
||||
local meta = get_general_metadata(task, true)
|
||||
local display_emails = {}
|
||||
local mail_targets = {}
|
||||
meta.mail_from = rule.mail_from or settings.mail_from
|
||||
local mail_rcpt = rule.mail_to or settings.mail_to
|
||||
if type(mail_rcpt) ~= 'table' then
|
||||
table.insert(display_emails, string.format('<%s>', mail_rcpt))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
else
|
||||
for _, e in ipairs(mail_rcpt) do
|
||||
table.insert(display_emails, string.format('<%s>', e))
|
||||
table.insert(mail_targets, mail_rcpt)
|
||||
end
|
||||
end
|
||||
if rule.email_alert_sender then
|
||||
local x = task:get_from('smtp')
|
||||
if x and string.len(x[1].addr) > 0 then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x[1].addr))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_user then
|
||||
local x = task:get_user()
|
||||
if x then
|
||||
table.insert(mail_targets, x)
|
||||
table.insert(display_emails, string.format('<%s>', x))
|
||||
end
|
||||
end
|
||||
if rule.email_alert_recipients then
|
||||
local x = task:get_recipients('smtp')
|
||||
if x then
|
||||
for _, e in ipairs(x) do
|
||||
if string.len(e.addr) > 0 then
|
||||
table.insert(mail_targets, e.addr)
|
||||
table.insert(display_emails, string.format('<%s>', e.addr))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
meta.mail_to = table.concat(display_emails, ', ')
|
||||
meta.our_message_id = rspamd_util.random_hex(12) .. '@rspamd'
|
||||
meta.date = rspamd_util.time_to_string(rspamd_util.get_time())
|
||||
return lua_util.template(rule.email_template or settings.email_template, meta), { mail_targets = mail_targets}
|
||||
end,
|
||||
json = function(task)
|
||||
return ucl.to_format(get_general_metadata(task), 'json-compact')
|
||||
end
|
||||
}
|
||||
|
||||
local function is_spam(action)
|
||||
return (action == 'reject' or action == 'add header' or action == 'rewrite subject')
|
||||
end
|
||||
|
||||
local selectors = {
|
||||
default = function(task)
|
||||
return true
|
||||
end,
|
||||
is_spam = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_spam_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return is_spam(action)
|
||||
end,
|
||||
is_reject = function(task)
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
is_reject_authed = function(task)
|
||||
if not task:get_user() then
|
||||
return false
|
||||
end
|
||||
local action = task:get_metric_action('default')
|
||||
return (action == 'reject')
|
||||
end,
|
||||
}
|
||||
|
||||
local function maybe_defer(task, rule)
|
||||
if rule.defer then
|
||||
rspamd_logger.warnx(task, 'deferring message')
|
||||
task:set_pre_result('soft reject', 'deferred', N)
|
||||
end
|
||||
end
|
||||
|
||||
local pushers = {
|
||||
redis_pubsub = function(task, formatted, rule)
|
||||
local _,ret,upstream
|
||||
local function redis_pub_cb(err)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s when publishing on server %s',
|
||||
err, upstream:get_addr())
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
ret,_,upstream = rspamd_redis_make_request(task,
|
||||
redis_params, -- connect params
|
||||
nil, -- hash key
|
||||
true, -- is write
|
||||
redis_pub_cb, --callback
|
||||
'PUBLISH', -- command
|
||||
{rule.channel, formatted} -- arguments
|
||||
)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'error connecting to redis')
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end,
|
||||
http = function(task, formatted, rule)
|
||||
local function http_callback(err, code)
|
||||
if err then
|
||||
rspamd_logger.errx(task, 'got error %s in http callback', err)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
if code ~= 200 then
|
||||
rspamd_logger.errx(task, 'got unexpected http status: %s', code)
|
||||
return maybe_defer(task, rule)
|
||||
end
|
||||
return true
|
||||
end
|
||||
local hdrs = {}
|
||||
if rule.meta_headers then
|
||||
local gm = get_general_metadata(task, false, true)
|
||||
local pfx = rule.meta_header_prefix or 'X-Rspamd-'
|
||||
for k, v in pairs(gm) do
|
||||
if type(v) == 'table' then
|
||||
hdrs[pfx .. k] = ucl.to_format(v, 'json-compact')
|
||||
else
|
||||
hdrs[pfx .. k] = v
|
||||
end
|
||||
end
|
||||
end
|
||||
rspamd_http.request({
|
||||
task=task,
|
||||
url=rule.url,
|
||||
body=formatted,
|
||||
callback=http_callback,
|
||||
mime_type=rule.mime_type or settings.mime_type,
|
||||
headers=hdrs,
|
||||
})
|
||||
end,
|
||||
send_mail = function(task, formatted, rule, extra)
|
||||
local lua_smtp = require "lua_smtp"
|
||||
local function sendmail_cb(ret, err)
|
||||
if not ret then
|
||||
rspamd_logger.errx(task, 'SMTP export error: %s', err)
|
||||
maybe_defer(task, rule)
|
||||
end
|
||||
end
|
||||
|
||||
lua_smtp.sendmail({
|
||||
task = task,
|
||||
host = rule.smtp,
|
||||
port = rule.smtp_port or settings.smtp_port or 25,
|
||||
from = rule.mail_from or settings.mail_from,
|
||||
recipients = extra.mail_targets or rule.mail_to or settings.mail_to,
|
||||
helo = rule.helo or settings.helo,
|
||||
timeout = rule.timeout or settings.timeout,
|
||||
}, formatted, sendmail_cb)
|
||||
end,
|
||||
}
|
||||
|
||||
local opts = rspamd_config:get_all_opt(N)
|
||||
if not opts then return end
|
||||
local process_settings = {
|
||||
select = function(val)
|
||||
selectors.custom = assert(load(val))()
|
||||
end,
|
||||
format = function(val)
|
||||
formatters.custom = assert(load(val))()
|
||||
end,
|
||||
push = function(val)
|
||||
pushers.custom = assert(load(val))()
|
||||
end,
|
||||
custom_push = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
pushers[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_select = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
selectors[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
custom_format = function(val)
|
||||
if type(val) == 'table' then
|
||||
for k, v in pairs(val) do
|
||||
formatters[k] = assert(load(v))()
|
||||
end
|
||||
end
|
||||
end,
|
||||
pusher_enabled = function(val)
|
||||
if type(val) == 'string' then
|
||||
if pushers[val] then
|
||||
settings.pusher_enabled[val] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
elseif type(val) == 'table' then
|
||||
for _, v in ipairs(val) do
|
||||
if pushers[v] then
|
||||
settings.pusher_enabled[v] = true
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Pusher type: %s is invalid', val)
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
for k, v in pairs(opts) do
|
||||
local f = process_settings[k]
|
||||
if f then
|
||||
f(opts[k])
|
||||
else
|
||||
settings[k] = v
|
||||
end
|
||||
end
|
||||
if type(settings.rules) ~= 'table' then
|
||||
-- Legacy config
|
||||
settings.rules = {}
|
||||
if not next(settings.pusher_enabled) then
|
||||
if pushers.custom then
|
||||
rspamd_logger.infox(rspamd_config, 'Custom pusher implicitly enabled')
|
||||
settings.pusher_enabled.custom = true
|
||||
else
|
||||
-- Check legacy options
|
||||
if settings.url then
|
||||
rspamd_logger.warnx(rspamd_config, 'HTTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.http = true
|
||||
end
|
||||
if settings.channel then
|
||||
rspamd_logger.warnx(rspamd_config, 'Redis Pubsub pusher implicitly enabled')
|
||||
settings.pusher_enabled.redis_pubsub = true
|
||||
end
|
||||
if settings.smtp and settings.mail_to then
|
||||
rspamd_logger.warnx(rspamd_config, 'SMTP pusher implicitly enabled')
|
||||
settings.pusher_enabled.send_mail = true
|
||||
end
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
if settings.formatter then
|
||||
settings.format = formatters[settings.formatter]
|
||||
if not settings.format then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s', settings.formatter)
|
||||
return
|
||||
end
|
||||
end
|
||||
if settings.selector then
|
||||
settings.select = selectors[settings.selector]
|
||||
if not settings.select then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s', settings.selector)
|
||||
return
|
||||
end
|
||||
end
|
||||
for k in pairs(settings.pusher_enabled) do
|
||||
local formatter = settings.pusher_format[k]
|
||||
local selector = settings.pusher_select[k]
|
||||
if not formatter then
|
||||
settings.pusher_format[k] = settings.formatter or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default formatter for %s pusher', k)
|
||||
else
|
||||
if not formatters[formatter] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such formatter: %s - disabling %s', formatter, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
if not selector then
|
||||
settings.pusher_select[k] = settings.selector or 'default'
|
||||
rspamd_logger.infox(rspamd_config, 'Using default selector for %s pusher', k)
|
||||
else
|
||||
if not selectors[selector] then
|
||||
rspamd_logger.errx(rspamd_config, 'No such selector: %s - disabling %s', selector, k)
|
||||
settings.pusher_enabled.k = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.redis_pubsub then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.pusher_enabled.redis_pubsub = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'redis_pubsub'
|
||||
r.channel = settings.channel
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.redis_pubsub
|
||||
r.formatter = settings.pusher_format.redis_pubsub
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.http then
|
||||
if not settings.url then
|
||||
rspamd_logger.errx(rspamd_config, 'No URL is specified')
|
||||
settings.pusher_enabled.http = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'http'
|
||||
r.url = settings.url
|
||||
r.mime_type = settings.mime_type
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.http
|
||||
r.formatter = settings.pusher_format.http
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if settings.pusher_enabled.send_mail then
|
||||
if not (settings.mail_to and settings.smtp) then
|
||||
rspamd_logger.errx(rspamd_config, 'No mail_to and/or smtp setting is specified')
|
||||
settings.pusher_enabled.send_mail = nil
|
||||
else
|
||||
local r = {}
|
||||
r.backend = 'send_mail'
|
||||
r.mail_to = settings.mail_to
|
||||
r.mail_from = settings.mail_from
|
||||
r.helo = settings.hello
|
||||
r.smtp = settings.smtp
|
||||
r.smtp_port = settings.smtp_port
|
||||
r.email_template = settings.email_template
|
||||
r.defer = settings.defer
|
||||
r.selector = settings.pusher_select.send_mail
|
||||
r.formatter = settings.pusher_format.send_mail
|
||||
settings.rules[r.backend:upper()] = r
|
||||
end
|
||||
end
|
||||
if not next(settings.pusher_enabled) then
|
||||
rspamd_logger.errx(rspamd_config, 'No push backend enabled')
|
||||
return
|
||||
end
|
||||
elseif not next(settings.rules) then
|
||||
lua_util.debugm(N, rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
if not settings.rules or not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
return
|
||||
end
|
||||
local backend_required_elements = {
|
||||
http = {
|
||||
'url',
|
||||
},
|
||||
smtp = {
|
||||
'mail_to',
|
||||
'smtp',
|
||||
},
|
||||
redis_pubsub = {
|
||||
'channel',
|
||||
},
|
||||
}
|
||||
local check_element = {
|
||||
selector = function(k, v)
|
||||
if not selectors[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid selector %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
formatter = function(k, v)
|
||||
if not formatters[v] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid formatter %s', k, v)
|
||||
return false
|
||||
else
|
||||
return true
|
||||
end
|
||||
end,
|
||||
}
|
||||
local backend_check = {
|
||||
default = function(k, rule)
|
||||
local reqset = backend_required_elements[rule.backend]
|
||||
if reqset then
|
||||
for _, e in ipairs(reqset) do
|
||||
if not rule[e] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s misses required setting %s', k, e)
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
for sett, v in pairs(rule) do
|
||||
local f = check_element[sett]
|
||||
if f then
|
||||
if not f(sett, v) then
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end,
|
||||
}
|
||||
backend_check.redis_pubsub = function(k, rule)
|
||||
if not redis_params then
|
||||
redis_params = rspamd_parse_redis_server(N)
|
||||
end
|
||||
if not redis_params then
|
||||
rspamd_logger.errx(rspamd_config, 'No redis servers are specified')
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
backend_check.default(k, rule)
|
||||
end
|
||||
end
|
||||
setmetatable(backend_check, {
|
||||
__index = function()
|
||||
return backend_check.default
|
||||
end,
|
||||
})
|
||||
for k, v in pairs(settings.rules) do
|
||||
if type(v) == 'table' then
|
||||
local backend = v.backend
|
||||
if not backend then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has no backend', k)
|
||||
settings.rules[k] = nil
|
||||
elseif not pushers[backend] then
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has invalid backend %s', k, backend)
|
||||
settings.rules[k] = nil
|
||||
else
|
||||
local f = backend_check[backend]
|
||||
f(k, v)
|
||||
end
|
||||
else
|
||||
rspamd_logger.errx(rspamd_config, 'Rule %s has bad type: %s', k, type(v))
|
||||
settings.rules[k] = nil
|
||||
end
|
||||
end
|
||||
|
||||
local function gen_exporter(rule)
|
||||
return function (task)
|
||||
if task:has_flag('skip') then return end
|
||||
local selector = rule.selector or 'default'
|
||||
local selected = selectors[selector](task)
|
||||
if selected then
|
||||
lua_util.debugm(N, task, 'Message selected for processing')
|
||||
local formatter = rule.formatter or 'default'
|
||||
local formatted, extra = formatters[formatter](task, rule)
|
||||
if formatted then
|
||||
pushers[rule.backend](task, formatted, rule, extra)
|
||||
else
|
||||
lua_util.debugm(N, task, 'Formatter [%s] returned non-truthy value [%s]', formatter, formatted)
|
||||
end
|
||||
else
|
||||
lua_util.debugm(N, task, 'Selector [%s] returned non-truthy value [%s]', selector, selected)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not next(settings.rules) then
|
||||
rspamd_logger.errx(rspamd_config, 'No rules enabled')
|
||||
lua_util.disable_module(N, "config")
|
||||
end
|
||||
for k, r in pairs(settings.rules) do
|
||||
rspamd_config:register_symbol({
|
||||
name = 'EXPORT_METADATA_' .. k,
|
||||
type = 'idempotent',
|
||||
callback = gen_exporter(r),
|
||||
priority = 10,
|
||||
flags = 'empty,explicit_disable,ignore_passthrough',
|
||||
})
|
||||
end
|
||||
@@ -1,11 +1,13 @@
|
||||
FROM debian:bullseye-slim
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG SOGO_DEBIAN_REPOSITORY=http://www.axis.cz/linux/debian
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG SOGO_DEBIAN_REPOSITORY=https://packagingv2.sogo.nu/sogo-nightly-debian/
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
ENV LC_ALL C
|
||||
ENV LC_ALL=C
|
||||
|
||||
# Prerequisites
|
||||
RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
@@ -21,36 +23,41 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
dirmngr \
|
||||
netcat \
|
||||
netcat-traditional \
|
||||
psmisc \
|
||||
wget \
|
||||
patch \
|
||||
python3 python3-pip \
|
||||
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true \
|
||||
&& mkdir /usr/share/doc/sogo \
|
||||
&& touch /usr/share/doc/sogo/empty.sh \
|
||||
&& apt-key adv --keyserver keys.openpgp.org --recv-key 74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 \
|
||||
&& echo "deb [trusted=yes] ${SOGO_DEBIAN_REPOSITORY} bullseye sogo-v5" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& wget -O- https://keys.openpgp.org/vks/v1/by-fingerprint/74FFC6D72B925A34B5D356BDF8A27B36A6E2EAE9 | gpg --dearmor | apt-key add - \
|
||||
&& echo "deb [trusted=yes] ${SOGO_DEBIAN_REPOSITORY} ${DEBIAN_VERSION} main" > /etc/apt/sources.list.d/sogo.list \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
sogo \
|
||||
sogo-activesync \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* /etc/apt/sources.list.d/sogo.list \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale
|
||||
|
||||
COPY ./bootstrap-sogo.sh /bootstrap-sogo.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY acl.diff /acl.diff
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
RUN chmod +x /bootstrap-sogo.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/sogo/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/sogo/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/sogo/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/sogo/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/sogo/docker-entrypoint.sh /
|
||||
|
||||
CMD exec /usr/bin/supervisord -c /etc/supervisor/supervisord.conf
|
||||
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
--- /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:57.987504204 +0200
|
||||
+++ /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:35.918291298 +0200
|
||||
@@ -46,7 +46,7 @@
|
||||
</md-item-template>
|
||||
</md-autocomplete>
|
||||
</div>
|
||||
- <md-card ng-repeat="user in acl.users | orderBy:['userClass', 'cn']"
|
||||
+ <md-card ng-repeat="user in acl.users | filter:{ userClass: 'normal' } | orderBy:['cn']"
|
||||
class="sg-collapsed"
|
||||
ng-class="{ 'sg-expanded': user.uid == acl.selectedUid }">
|
||||
<a class="md-flex md-button" ng-click="acl.selectUser(user, $event)">
|
||||
@@ -1,251 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Wait until port becomes free and send sig
|
||||
until ! nc -z sogo-mailcow 20000;
|
||||
do
|
||||
killall -TERM sogod
|
||||
sleep 3
|
||||
done
|
||||
|
||||
# Wait for updated schema
|
||||
DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
while [[ "${DBV_NOW}" != "${DBV_NEW}" ]]; do
|
||||
echo "Waiting for schema update..."
|
||||
DBV_NOW=$(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
sleep 5
|
||||
done
|
||||
echo "DB schema is ${DBV_NOW}"
|
||||
|
||||
# Recreate view
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing sogo_view..."
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP VIEW IF EXISTS sogo_view"
|
||||
while [[ ${VIEW_OK} != 'OK' ]]; do
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
CREATE VIEW sogo_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) AS
|
||||
SELECT
|
||||
mailbox.username,
|
||||
mailbox.domain,
|
||||
mailbox.username,
|
||||
IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.force_pw_update')) = '0', IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.sogo_access')) = 1, password, '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'), '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'),
|
||||
mailbox.name,
|
||||
mailbox.username,
|
||||
IFNULL(GROUP_CONCAT(ga.aliases ORDER BY ga.aliases SEPARATOR ' '), ''),
|
||||
IFNULL(gda.ad_alias, ''),
|
||||
IFNULL(external_acl.send_as_acl, ''),
|
||||
mailbox.kind,
|
||||
mailbox.multiple_bookings
|
||||
FROM
|
||||
mailbox
|
||||
LEFT OUTER JOIN
|
||||
grouped_mail_aliases ga
|
||||
ON ga.username REGEXP CONCAT('(^|,)', mailbox.username, '($|,)')
|
||||
LEFT OUTER JOIN
|
||||
grouped_domain_alias_address gda
|
||||
ON gda.username = mailbox.username
|
||||
LEFT OUTER JOIN
|
||||
grouped_sender_acl_external external_acl
|
||||
ON external_acl.username = mailbox.username
|
||||
WHERE
|
||||
mailbox.active = '1'
|
||||
GROUP BY
|
||||
mailbox.username;
|
||||
EOF
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
|
||||
VIEW_OK=OK
|
||||
else
|
||||
echo "Will retry to setup SOGo view in 3s..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
else
|
||||
while [[ ${VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'sogo_view'") ]]; then
|
||||
VIEW_OK=OK
|
||||
else
|
||||
echo "Waiting for SOGo view to be created by master..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Wait for static view table if missing after update and update content
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing _sogo_static_view..."
|
||||
while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
|
||||
STATIC_VIEW_OK=OK
|
||||
echo "Updating _sogo_static_view content..."
|
||||
# If changed, also update init_db.inc.php
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "REPLACE INTO _sogo_static_view (c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings) SELECT c_uid, domain, c_name, c_password, c_cn, mail, aliases, ad_aliases, ext_acl, kind, multiple_bookings from sogo_view;"
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "DELETE FROM _sogo_static_view WHERE c_uid NOT IN (SELECT username FROM mailbox WHERE active = '1')"
|
||||
else
|
||||
echo "Waiting for database initialization..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
else
|
||||
while [[ ${STATIC_VIEW_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '_sogo_static_view'") ]]; then
|
||||
STATIC_VIEW_OK=OK
|
||||
else
|
||||
echo "Waiting for database initialization by master..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
|
||||
# Recreate password update trigger
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing update trigger..."
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
|
||||
while [[ ${TRIGGER_OK} != 'OK' ]]; do
|
||||
mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELIMITER -
|
||||
CREATE TRIGGER sogo_update_password AFTER UPDATE ON _sogo_static_view
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
UPDATE mailbox SET password = NEW.c_password WHERE NEW.c_uid = username;
|
||||
END;
|
||||
-
|
||||
DELIMITER ;
|
||||
EOF
|
||||
if [[ ! -z $(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TRIGGERS WHERE TRIGGER_NAME = 'sogo_update_password'") ]]; then
|
||||
TRIGGER_OK=OK
|
||||
else
|
||||
echo "Will retry to setup SOGo password update trigger in 3s"
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# cat /dev/urandom seems to hang here occasionally and is not recommended anyway, better use openssl
|
||||
RAND_PASS=$(openssl rand -base64 16 | tr -dc _A-Z-a-z-0-9)
|
||||
|
||||
# Generate plist header with timezone data
|
||||
mkdir -p /var/lib/sogo/GNUstep/Defaults/
|
||||
cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//GNUstep//DTD plist 0.9//EN" "http://www.gnustep.org/plist-0_9.xml">
|
||||
<plist version="0.9">
|
||||
<dict>
|
||||
<key>OCSAclURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_acl</string>
|
||||
<key>SOGoIMAPServer</key>
|
||||
<string>imap://${IPV4_NETWORK}.250:143/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoSieveServer</key>
|
||||
<string>sieve://${IPV4_NETWORK}.250:4190/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoSMTPServer</key>
|
||||
<string>smtp://${IPV4_NETWORK}.253:588/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoTrustProxyAuthentication</key>
|
||||
<string>YES</string>
|
||||
<key>SOGoEncryptionKey</key>
|
||||
<string>${RAND_PASS}</string>
|
||||
<key>OCSCacheFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
|
||||
<key>OCSEMailAlarmsFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_alarms_folder</string>
|
||||
<key>OCSFolderInfoURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_folder_info</string>
|
||||
<key>OCSSessionsFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_sessions_folder</string>
|
||||
<key>OCSStoreURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_store</string>
|
||||
<key>SOGoProfileURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_user_profile</string>
|
||||
<key>SOGoTimeZone</key>
|
||||
<string>${TZ}</string>
|
||||
<key>domains</key>
|
||||
<dict>
|
||||
EOF
|
||||
|
||||
# Generate multi-domain setup
|
||||
while read -r line gal
|
||||
do
|
||||
echo " <key>${line}</key>
|
||||
<dict>
|
||||
<key>SOGoMailDomain</key>
|
||||
<string>${line}</string>
|
||||
<key>SOGoUserSources</key>
|
||||
<array>
|
||||
<dict>
|
||||
<key>MailFieldNames</key>
|
||||
<array>
|
||||
<string>aliases</string>
|
||||
<string>ad_aliases</string>
|
||||
<string>ext_acl</string>
|
||||
</array>
|
||||
<key>KindFieldName</key>
|
||||
<string>kind</string>
|
||||
<key>DomainFieldName</key>
|
||||
<string>domain</string>
|
||||
<key>MultipleBookingsFieldName</key>
|
||||
<string>multiple_bookings</string>
|
||||
<key>listRequiresDot</key>
|
||||
<string>NO</string>
|
||||
<key>canAuthenticate</key>
|
||||
<string>YES</string>
|
||||
<key>displayName</key>
|
||||
<string>GAL ${line}</string>
|
||||
<key>id</key>
|
||||
<string>${line}</string>
|
||||
<key>isAddressBook</key>
|
||||
<string>${gal}</string>
|
||||
<key>type</key>
|
||||
<string>sql</string>
|
||||
<key>userPasswordAlgorithm</key>
|
||||
<string>${MAILCOW_PASS_SCHEME}</string>
|
||||
<key>prependPasswordScheme</key>
|
||||
<string>YES</string>
|
||||
<key>viewURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/_sogo_static_view</string>
|
||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
# Generate alternative LDAP authentication dict, when SQL authentication fails
|
||||
# This will nevertheless read attributes from LDAP
|
||||
line=${line} envsubst < /etc/sogo/plist_ldap >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
echo " </array>
|
||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
done < <(mysql --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
|
||||
|
||||
# Generate footer
|
||||
echo ' </dict>
|
||||
</dict>
|
||||
</plist>' >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
|
||||
# Fix permissions
|
||||
chown sogo:sogo -R /var/lib/sogo/
|
||||
chmod 600 /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
|
||||
# Patch ACLs
|
||||
#if [[ ${ACL_ANYONE} == 'allow' ]]; then
|
||||
# #enable any or authenticated targets for ACL
|
||||
# if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
||||
# patch -R /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
||||
# fi
|
||||
#else
|
||||
# #disable any or authenticated targets for ACL
|
||||
# if patch -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
||||
# patch /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
||||
# fi
|
||||
#fi
|
||||
|
||||
# Copy logo, if any
|
||||
[[ -f /etc/sogo/sogo-full.svg ]] && cp /etc/sogo/sogo-full.svg /usr/lib/GNUstep/SOGo/WebServerResources/img/sogo-full.svg
|
||||
|
||||
# Rsync web content
|
||||
echo "Syncing web content with named volume"
|
||||
rsync -a /usr/lib/GNUstep/SOGo/. /sogo_web/
|
||||
|
||||
# Chown backup path
|
||||
chown -R sogo:sogo /sogo_backup
|
||||
|
||||
exec gosu sogo /usr/sbin/sogod
|
||||
@@ -1,15 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_SOGO}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_SOGO=y, skipping SOGo..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
@@ -18,4 +8,13 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
exec "$@"
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting SOGo."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting SOGo..."
|
||||
exec gosu sogo /usr/sbin/sogod
|
||||
|
||||
@@ -11,8 +11,8 @@ stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
priority=1
|
||||
|
||||
[program:bootstrap-sogo]
|
||||
command=/bootstrap-sogo.sh
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -22,6 +22,7 @@ destination d_redis_ui_log {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -30,6 +31,7 @@ destination d_redis_f2b_channel {
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis2")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
@version: 3.28
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
@@ -22,6 +22,7 @@ destination d_redis_ui_log {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "SOGO_LOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
@@ -30,6 +31,7 @@ destination d_redis_f2b_channel {
|
||||
host("redis-mailcow")
|
||||
persist-name("redis2")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("PUBLISH" "F2B_CHANNEL" "$(sanitize $MESSAGE)")
|
||||
);
|
||||
};
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
FROM solr:7.7-slim
|
||||
|
||||
USER root
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
|
||||
COPY solr.sh /
|
||||
COPY solr-config-7.7.0.xml /
|
||||
COPY solr-schema-7.7.0.xml /
|
||||
|
||||
RUN dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true \
|
||||
&& apt-get update && apt-get install -y --no-install-recommends \
|
||||
tzdata \
|
||||
curl \
|
||||
bash \
|
||||
zip \
|
||||
&& apt-get autoclean \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& chmod +x /solr.sh \
|
||||
&& sync \
|
||||
&& bash /solr.sh --bootstrap
|
||||
|
||||
RUN zip -q -d /opt/solr/server/lib/ext/log4j-core-*.jar org/apache/logging/log4j/core/lookup/JndiLookup.class
|
||||
|
||||
RUN apt remove zip -y
|
||||
|
||||
CMD ["/solr.sh"]
|
||||
@@ -1,289 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" ?>
|
||||
|
||||
<!-- This is the default config with stuff non-essential to Dovecot removed. -->
|
||||
|
||||
<config>
|
||||
<!-- Controls what version of Lucene various components of Solr
|
||||
adhere to. Generally, you want to use the latest version to
|
||||
get all bug fixes and improvements. It is highly recommended
|
||||
that you fully re-index after changing this setting as it can
|
||||
affect both how text is indexed and queried.
|
||||
-->
|
||||
<luceneMatchVersion>7.7.0</luceneMatchVersion>
|
||||
|
||||
<!-- A 'dir' option by itself adds any files found in the directory
|
||||
to the classpath, this is useful for including all jars in a
|
||||
directory.
|
||||
|
||||
When a 'regex' is specified in addition to a 'dir', only the
|
||||
files in that directory which completely match the regex
|
||||
(anchored on both ends) will be included.
|
||||
|
||||
If a 'dir' option (with or without a regex) is used and nothing
|
||||
is found that matches, a warning will be logged.
|
||||
|
||||
The examples below can be used to load some solr-contribs along
|
||||
with their external dependencies.
|
||||
-->
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/extraction/lib" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-cell-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/clustering/lib/" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-clustering-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/langid/lib/" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-langid-\d.*\.jar" />
|
||||
|
||||
<lib dir="${solr.install.dir:../../../..}/contrib/velocity/lib" regex=".*\.jar" />
|
||||
<lib dir="${solr.install.dir:../../../..}/dist/" regex="solr-velocity-\d.*\.jar" />
|
||||
|
||||
<!-- Data Directory
|
||||
|
||||
Used to specify an alternate directory to hold all index data
|
||||
other than the default ./data under the Solr home. If
|
||||
replication is in use, this should match the replication
|
||||
configuration.
|
||||
-->
|
||||
<dataDir>${solr.data.dir:}</dataDir>
|
||||
|
||||
<!-- The default high-performance update handler -->
|
||||
<updateHandler class="solr.DirectUpdateHandler2">
|
||||
|
||||
<!-- Enables a transaction log, used for real-time get, durability, and
|
||||
and solr cloud replica recovery. The log can grow as big as
|
||||
uncommitted changes to the index, so use of a hard autoCommit
|
||||
is recommended (see below).
|
||||
"dir" - the target directory for transaction logs, defaults to the
|
||||
solr data directory.
|
||||
"numVersionBuckets" - sets the number of buckets used to keep
|
||||
track of max version values when checking for re-ordered
|
||||
updates; increase this value to reduce the cost of
|
||||
synchronizing access to version buckets during high-volume
|
||||
indexing, this requires 8 bytes (long) * numVersionBuckets
|
||||
of heap space per Solr core.
|
||||
-->
|
||||
<updateLog>
|
||||
<str name="dir">${solr.ulog.dir:}</str>
|
||||
<int name="numVersionBuckets">${solr.ulog.numVersionBuckets:65536}</int>
|
||||
</updateLog>
|
||||
|
||||
<!-- AutoCommit
|
||||
|
||||
Perform a hard commit automatically under certain conditions.
|
||||
Instead of enabling autoCommit, consider using "commitWithin"
|
||||
when adding documents.
|
||||
|
||||
http://wiki.apache.org/solr/UpdateXmlMessages
|
||||
|
||||
maxDocs - Maximum number of documents to add since the last
|
||||
commit before automatically triggering a new commit.
|
||||
|
||||
maxTime - Maximum amount of time in ms that is allowed to pass
|
||||
since a document was added before automatically
|
||||
triggering a new commit.
|
||||
openSearcher - if false, the commit causes recent index changes
|
||||
to be flushed to stable storage, but does not cause a new
|
||||
searcher to be opened to make those changes visible.
|
||||
|
||||
If the updateLog is enabled, then it's highly recommended to
|
||||
have some sort of hard autoCommit to limit the log size.
|
||||
-->
|
||||
<autoCommit>
|
||||
<maxTime>${solr.autoCommit.maxTime:15000}</maxTime>
|
||||
<openSearcher>false</openSearcher>
|
||||
</autoCommit>
|
||||
|
||||
<!-- softAutoCommit is like autoCommit except it causes a
|
||||
'soft' commit which only ensures that changes are visible
|
||||
but does not ensure that data is synced to disk. This is
|
||||
faster and more near-realtime friendly than a hard commit.
|
||||
-->
|
||||
<autoSoftCommit>
|
||||
<maxTime>${solr.autoSoftCommit.maxTime:-1}</maxTime>
|
||||
</autoSoftCommit>
|
||||
|
||||
<!-- Update Related Event Listeners
|
||||
|
||||
Various IndexWriter related events can trigger Listeners to
|
||||
take actions.
|
||||
|
||||
postCommit - fired after every commit or optimize command
|
||||
postOptimize - fired after every optimize command
|
||||
-->
|
||||
|
||||
</updateHandler>
|
||||
|
||||
<!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Query section - these settings control query time things like caches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -->
|
||||
<query>
|
||||
<!-- Solr Internal Query Caches
|
||||
|
||||
There are two implementations of cache available for Solr,
|
||||
LRUCache, based on a synchronized LinkedHashMap, and
|
||||
FastLRUCache, based on a ConcurrentHashMap.
|
||||
|
||||
FastLRUCache has faster gets and slower puts in single
|
||||
threaded operation and thus is generally faster than LRUCache
|
||||
when the hit ratio of the cache is high (> 75%), and may be
|
||||
faster under other scenarios on multi-cpu systems.
|
||||
-->
|
||||
|
||||
<!-- Filter Cache
|
||||
|
||||
Cache used by SolrIndexSearcher for filters (DocSets),
|
||||
unordered sets of *all* documents that match a query. When a
|
||||
new searcher is opened, its caches may be prepopulated or
|
||||
"autowarmed" using data from caches in the old searcher.
|
||||
autowarmCount is the number of items to prepopulate. For
|
||||
LRUCache, the autowarmed items will be the most recently
|
||||
accessed items.
|
||||
|
||||
Parameters:
|
||||
class - the SolrCache implementation LRUCache or
|
||||
(LRUCache or FastLRUCache)
|
||||
size - the maximum number of entries in the cache
|
||||
initialSize - the initial capacity (number of entries) of
|
||||
the cache. (see java.util.HashMap)
|
||||
autowarmCount - the number of entries to prepopulate from
|
||||
and old cache.
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy. Note that when this option is specified, the size
|
||||
and initialSize parameters are ignored.
|
||||
-->
|
||||
<filterCache class="solr.FastLRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- Query Result Cache
|
||||
|
||||
Caches results of searches - ordered lists of document ids
|
||||
(DocList) based on a query, a sort, and the range of documents requested.
|
||||
Additional supported parameter by LRUCache:
|
||||
maxRamMB - the maximum amount of RAM (in MB) that this cache is allowed
|
||||
to occupy
|
||||
-->
|
||||
<queryResultCache class="solr.LRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- Document Cache
|
||||
|
||||
Caches Lucene Document objects (the stored fields for each
|
||||
document). Since Lucene internal document ids are transient,
|
||||
this cache will not be autowarmed.
|
||||
-->
|
||||
<documentCache class="solr.LRUCache"
|
||||
size="512"
|
||||
initialSize="512"
|
||||
autowarmCount="0"/>
|
||||
|
||||
<!-- custom cache currently used by block join -->
|
||||
<cache name="perSegFilter"
|
||||
class="solr.search.LRUCache"
|
||||
size="10"
|
||||
initialSize="0"
|
||||
autowarmCount="10"
|
||||
regenerator="solr.NoOpRegenerator" />
|
||||
|
||||
<!-- Lazy Field Loading
|
||||
|
||||
If true, stored fields that are not requested will be loaded
|
||||
lazily. This can result in a significant speed improvement
|
||||
if the usual case is to not load all stored fields,
|
||||
especially if the skipped fields are large compressed text
|
||||
fields.
|
||||
-->
|
||||
<enableLazyFieldLoading>true</enableLazyFieldLoading>
|
||||
|
||||
<!-- Result Window Size
|
||||
|
||||
An optimization for use with the queryResultCache. When a search
|
||||
is requested, a superset of the requested number of document ids
|
||||
are collected. For example, if a search for a particular query
|
||||
requests matching documents 10 through 19, and queryWindowSize is 50,
|
||||
then documents 0 through 49 will be collected and cached. Any further
|
||||
requests in that range can be satisfied via the cache.
|
||||
-->
|
||||
<queryResultWindowSize>20</queryResultWindowSize>
|
||||
|
||||
<!-- Maximum number of documents to cache for any entry in the
|
||||
queryResultCache.
|
||||
-->
|
||||
<queryResultMaxDocsCached>200</queryResultMaxDocsCached>
|
||||
|
||||
<!-- Use Cold Searcher
|
||||
|
||||
If a search request comes in and there is no current
|
||||
registered searcher, then immediately register the still
|
||||
warming searcher and use it. If "false" then all requests
|
||||
will block until the first searcher is done warming.
|
||||
-->
|
||||
<useColdSearcher>false</useColdSearcher>
|
||||
|
||||
</query>
|
||||
|
||||
|
||||
<!-- Request Dispatcher
|
||||
|
||||
This section contains instructions for how the SolrDispatchFilter
|
||||
should behave when processing requests for this SolrCore.
|
||||
|
||||
-->
|
||||
<requestDispatcher>
|
||||
<httpCaching never304="true" />
|
||||
</requestDispatcher>
|
||||
|
||||
<!-- Request Handlers
|
||||
|
||||
http://wiki.apache.org/solr/SolrRequestHandler
|
||||
|
||||
Incoming queries will be dispatched to a specific handler by name
|
||||
based on the path specified in the request.
|
||||
|
||||
If a Request Handler is declared with startup="lazy", then it will
|
||||
not be initialized until the first request that uses it.
|
||||
|
||||
-->
|
||||
<!-- SearchHandler
|
||||
|
||||
http://wiki.apache.org/solr/SearchHandler
|
||||
|
||||
For processing Search Queries, the primary Request Handler
|
||||
provided with Solr is "SearchHandler" It delegates to a sequent
|
||||
of SearchComponents (see below) and supports distributed
|
||||
queries across multiple shards
|
||||
-->
|
||||
<requestHandler name="/select" class="solr.SearchHandler">
|
||||
<!-- default values for query parameters can be specified, these
|
||||
will be overridden by parameters in the request
|
||||
-->
|
||||
<lst name="defaults">
|
||||
<str name="echoParams">explicit</str>
|
||||
<int name="rows">10</int>
|
||||
</lst>
|
||||
</requestHandler>
|
||||
|
||||
<initParams path="/update/**,/select">
|
||||
<lst name="defaults">
|
||||
<str name="df">_text_</str>
|
||||
</lst>
|
||||
</initParams>
|
||||
|
||||
<!-- Response Writers
|
||||
|
||||
http://wiki.apache.org/solr/QueryResponseWriter
|
||||
|
||||
Request responses will be written using the writer specified by
|
||||
the 'wt' request parameter matching the name of a registered
|
||||
writer.
|
||||
|
||||
The "default" writer is the default and will be used if 'wt' is
|
||||
not specified in the request.
|
||||
-->
|
||||
<queryResponseWriter name="xml"
|
||||
default="true"
|
||||
class="solr.XMLResponseWriter" />
|
||||
</config>
|
||||
@@ -1,49 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<schema name="dovecot-fts" version="2.0">
|
||||
<fieldType name="string" class="solr.StrField" omitNorms="true" sortMissingLast="true"/>
|
||||
<fieldType name="long" class="solr.LongPointField" positionIncrementGap="0"/>
|
||||
<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true"/>
|
||||
|
||||
<fieldType name="text" class="solr.TextField" autoGeneratePhraseQueries="true" positionIncrementGap="100">
|
||||
<analyzer type="index">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.EdgeNGramFilterFactory" minGramSize="3" maxGramSize="20"/>
|
||||
<filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
|
||||
<filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
|
||||
<filter class="solr.FlattenGraphFilterFactory"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
|
||||
<filter class="solr.PorterStemFilterFactory"/>
|
||||
</analyzer>
|
||||
<analyzer type="query">
|
||||
<tokenizer class="solr.StandardTokenizerFactory"/>
|
||||
<filter class="solr.SynonymGraphFilterFactory" expand="true" ignoreCase="true" synonyms="synonyms.txt"/>
|
||||
<filter class="solr.FlattenGraphFilterFactory"/>
|
||||
<filter class="solr.StopFilterFactory" words="stopwords.txt" ignoreCase="true"/>
|
||||
<filter class="solr.WordDelimiterGraphFilterFactory" catenateNumbers="1" generateNumberParts="1" splitOnCaseChange="1" generateWordParts="1" splitOnNumerics="1" catenateAll="1" catenateWords="1"/>
|
||||
<filter class="solr.LowerCaseFilterFactory"/>
|
||||
<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt"/>
|
||||
<filter class="solr.PorterStemFilterFactory"/>
|
||||
</analyzer>
|
||||
</fieldType>
|
||||
|
||||
<field name="id" type="string" indexed="true" required="true" stored="true"/>
|
||||
<field name="uid" type="long" indexed="true" required="true" stored="true"/>
|
||||
<field name="box" type="string" indexed="true" required="true" stored="true"/>
|
||||
<field name="user" type="string" indexed="true" required="true" stored="true"/>
|
||||
|
||||
<field name="hdr" type="text" indexed="true" stored="false"/>
|
||||
<field name="body" type="text" indexed="true" stored="false"/>
|
||||
|
||||
<field name="from" type="text" indexed="true" stored="false"/>
|
||||
<field name="to" type="text" indexed="true" stored="false"/>
|
||||
<field name="cc" type="text" indexed="true" stored="false"/>
|
||||
<field name="bcc" type="text" indexed="true" stored="false"/>
|
||||
<field name="subject" type="text" indexed="true" stored="false"/>
|
||||
|
||||
<!-- Used by Solr internally: -->
|
||||
<field name="_version_" type="long" indexed="true" stored="true"/>
|
||||
|
||||
<uniqueKey>id</uniqueKey>
|
||||
</schema>
|
||||
@@ -1,61 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_SOLR}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_SOLR=y, skipping Solr..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MEM_TOTAL=$(awk '/MemTotal/ {print $2}' /proc/meminfo)
|
||||
|
||||
if [[ "${1}" != "--bootstrap" ]]; then
|
||||
if [ ${MEM_TOTAL} -lt "2097152" ]; then
|
||||
echo "System memory less than 2 GB, skipping Solr..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
set -e
|
||||
|
||||
# run the optional initdb
|
||||
. /opt/docker-solr/scripts/run-initdb
|
||||
|
||||
# fixing volume permission
|
||||
[[ -d /opt/solr/server/solr/dovecot-fts/data ]] && chown -R solr:solr /opt/solr/server/solr/dovecot-fts/data
|
||||
if [[ "${1}" != "--bootstrap" ]]; then
|
||||
sed -i '/SOLR_HEAP=/c\SOLR_HEAP="'${SOLR_HEAP:-1024}'m"' /opt/solr/bin/solr.in.sh
|
||||
else
|
||||
sed -i '/SOLR_HEAP=/c\SOLR_HEAP="256m"' /opt/solr/bin/solr.in.sh
|
||||
fi
|
||||
|
||||
if [[ "${1}" == "--bootstrap" ]]; then
|
||||
echo "Creating initial configuration"
|
||||
echo "Modifying default config set"
|
||||
cp /solr-config-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/solrconfig.xml
|
||||
cp /solr-schema-7.7.0.xml /opt/solr/server/solr/configsets/_default/conf/schema.xml
|
||||
rm /opt/solr/server/solr/configsets/_default/conf/managed-schema
|
||||
|
||||
echo "Starting local Solr instance to setup configuration"
|
||||
gosu solr start-local-solr
|
||||
|
||||
echo "Creating core \"dovecot-fts\""
|
||||
gosu solr /opt/solr/bin/solr create -c "dovecot-fts"
|
||||
|
||||
# See https://github.com/docker-solr/docker-solr/issues/27
|
||||
echo "Checking core"
|
||||
while ! wget -O - 'http://localhost:8983/solr/admin/cores?action=STATUS' | grep -q instanceDir; do
|
||||
echo "Could not find any cores, waiting..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
echo "Created core \"dovecot-fts\""
|
||||
|
||||
echo "Stopping local Solr"
|
||||
gosu solr stop-local-solr
|
||||
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exec gosu solr solr-foreground
|
||||
|
||||
@@ -1,30 +1,36 @@
|
||||
FROM alpine:3.18
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer "The Infrastructure Company GmbH GmbH <info@servercow.de>"
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
RUN apk add --update --no-cache \
|
||||
curl \
|
||||
bind-tools \
|
||||
netcat-openbsd \
|
||||
coreutils \
|
||||
unbound \
|
||||
bash \
|
||||
openssl \
|
||||
drill \
|
||||
tzdata \
|
||||
syslog-ng \
|
||||
supervisor \
|
||||
&& curl -o /etc/unbound/root.hints https://www.internic.net/domain/named.cache \
|
||||
&& chown root:unbound /etc/unbound \
|
||||
&& adduser unbound tty \
|
||||
&& adduser unbound tty \
|
||||
&& chmod 775 /etc/unbound
|
||||
|
||||
EXPOSE 53/udp 53/tcp
|
||||
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
# healthcheck (nslookup)
|
||||
# healthcheck (dig, ping)
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
RUN chmod +x /healthcheck.sh
|
||||
HEALTHCHECK --interval=5s --timeout=30s CMD [ "/healthcheck.sh" ]
|
||||
HEALTHCHECK --interval=30s --timeout=10s \
|
||||
CMD sh -c '[ -f /tmp/healthcheck_status ] && [ "$(cat /tmp/healthcheck_status)" -eq 0 ] || exit 1'
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/sbin/unbound"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -1,99 +1,102 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Skip Unbound (DNS Resolver) Healthchecks (NOT Recommended!)
|
||||
if [[ "${SKIP_UNBOUND_HEALTHCHECK}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
SKIP_UNBOUND_HEALTHCHECK=y
|
||||
fi
|
||||
STATUS_FILE="/tmp/healthcheck_status"
|
||||
RUNS=0
|
||||
|
||||
# Declare log function for logfile inside container
|
||||
function log_to_file() {
|
||||
echo "$(date +"%Y-%m-%d %H:%M:%S"): $1" > /var/log/healthcheck.log
|
||||
# Declare log function for logfile to stdout
|
||||
function log_to_stdout() {
|
||||
echo "$(date +"%Y-%m-%d %H:%M:%S"): $1"
|
||||
}
|
||||
|
||||
# General Ping function to check general pingability
|
||||
function check_ping() {
|
||||
declare -a ipstoping=("1.1.1.1" "8.8.8.8" "9.9.9.9")
|
||||
declare -a ipstoping=("1.1.1.1" "8.8.8.8" "9.9.9.9")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for ip in "${ipstoping[@]}" ; do
|
||||
ping -q -c 3 -w 5 "$ip"
|
||||
if [ $? -ne 0 ]; then
|
||||
log_to_file "Healthcheck: Couldn't ping $ip for 5 seconds... Gave up!"
|
||||
log_to_file "Please check your internet connection or firewall rules to fix this error, because a simple ping test should always go through from the unbound container!"
|
||||
return 1
|
||||
fi
|
||||
for ip in "${ipstoping[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
ping -q -c 3 -w 5 "$ip" > /dev/null
|
||||
if [ $? -eq 0 ]; then
|
||||
success=true
|
||||
break
|
||||
else
|
||||
log_to_stdout "Healthcheck: Failed to ping $ip on attempt $i. Trying again..."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: Couldn't ping $ip after 3 attempts. Marking this IP as failed."
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many ping failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
|
||||
log_to_file "Healthcheck: Ping Checks WORKING properly!"
|
||||
return 0
|
||||
}
|
||||
|
||||
# General DNS Resolve Check against Unbound Resolver himself
|
||||
function check_dns() {
|
||||
declare -a domains=("mailcow.email" "github.com" "hub.docker.com")
|
||||
declare -a domains=("fuzzy.mailcow.email" "github.com" "hub.docker.com")
|
||||
local fail_tolerance=1
|
||||
local failures=0
|
||||
|
||||
for domain in "${domains[@]}" ; do
|
||||
for ((i=1; i<=3; i++)); do
|
||||
dig +short +timeout=2 +tries=1 "$domain" @127.0.0.1 > /dev/null
|
||||
if [ $? -ne 0 ]; then
|
||||
log_to_file "Healthcheck: DNS Resolution Failed on $i attempt! Trying again..."
|
||||
if [ $i -eq 3 ]; then
|
||||
log_to_file "Healthcheck: DNS Resolution not possible after $i attempts... Gave up!"
|
||||
log_to_file "Maybe check your outbound firewall, as it needs to resolve DNS over TCP AND UDP!"
|
||||
return 1
|
||||
fi
|
||||
for domain in "${domains[@]}" ; do
|
||||
success=false
|
||||
for ((i=1; i<=3; i++)); do
|
||||
dig_output=$(dig +short +timeout=2 +tries=1 "$domain" @127.0.0.1 2>/dev/null)
|
||||
dig_rc=$?
|
||||
|
||||
if [ $dig_rc -ne 0 ] || [ -z "$dig_output" ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution Failed on attempt $i for $domain! Trying again..."
|
||||
else
|
||||
success=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
log_to_file "Healthcheck: DNS Resolver WORKING properly!"
|
||||
return 0
|
||||
|
||||
if [ "$success" = false ]; then
|
||||
log_to_stdout "Healthcheck: DNS Resolution not possible after 3 attempts for $domain... Gave up!"
|
||||
((failures++))
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $failures -gt $fail_tolerance ]; then
|
||||
log_to_stdout "Healthcheck: Too many DNS failures ($fail_tolerance failures allowed, you got $failures failures), marking Healthcheck as unhealthy..."
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Simple Netcat Check to connect to common webports
|
||||
function check_netcat() {
|
||||
declare -a domains=("mailcow.email" "github.com" "hub.docker.com")
|
||||
declare -a ports=("80" "443")
|
||||
while true; do
|
||||
|
||||
for domain in "${domains[@]}" ; do
|
||||
for port in "${ports[@]}" ; do
|
||||
nc -z -w 2 $domain $port
|
||||
if [ $? -ne 0 ]; then
|
||||
log_to_file "Healthcheck: Could not reach $domain on Port $port... Gave up!"
|
||||
log_to_file "Please check your internet connection or firewall rules to fix this error."
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [[ ${SKIP_UNBOUND_HEALTHCHECK} == "y" ]]; then
|
||||
log_to_stdout "Healthcheck: ALL CHECKS WERE SKIPPED! Unbound is healthy!"
|
||||
echo "0" > $STATUS_FILE
|
||||
sleep 365d
|
||||
fi
|
||||
|
||||
log_to_file "Healthcheck: Netcat Checks WORKING properly!"
|
||||
return 0
|
||||
# run checks, if check is not returning 0 (return value if check is ok), healthcheck will exit with 1 (marked in docker as unhealthy)
|
||||
check_ping
|
||||
PING_STATUS=$?
|
||||
|
||||
}
|
||||
check_dns
|
||||
DNS_STATUS=$?
|
||||
|
||||
if [[ ${SKIP_UNBOUND_HEALTHCHECK} == "y" ]]; then
|
||||
log_to_file "Healthcheck: ALL CHECKS WERE SKIPPED! Unbound is healthy!"
|
||||
exit 0
|
||||
fi
|
||||
if [ $PING_STATUS -ne 0 ] || [ $DNS_STATUS -ne 0 ]; then
|
||||
echo "1" > $STATUS_FILE
|
||||
|
||||
# run checks, if check is not returning 0 (return value if check is ok), healthcheck will exit with 1 (marked in docker as unhealthy)
|
||||
check_ping
|
||||
else
|
||||
echo "0" > $STATUS_FILE
|
||||
fi
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
sleep 30
|
||||
|
||||
check_dns
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check_netcat
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_to_file "Healthcheck: ALL CHECKS WERE SUCCESSFUL! Unbound is healthy!"
|
||||
exit 0
|
||||
done
|
||||
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
10
data/Dockerfiles/unbound/stop-supervisor.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
|
||||
rm -rf /tmp/healthcheck_status
|
||||
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
32
data/Dockerfiles/unbound/supervisord.conf
Normal file
@@ -0,0 +1,32 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:syslog-ng]
|
||||
command=/usr/sbin/syslog-ng --foreground --no-caps
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:unbound]
|
||||
command=/usr/sbin/unbound
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[program:unbound-healthcheck]
|
||||
command=/bin/bash /healthcheck.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autorestart=true
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
21
data/Dockerfiles/unbound/syslog-ng.conf
Normal file
@@ -0,0 +1,21 @@
|
||||
@version: 4.5
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats(freq(0));
|
||||
keep_timestamp(no);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_dgram {
|
||||
unix-dgram("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
log {
|
||||
source(s_dgram);
|
||||
destination(d_stdout);
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
FROM alpine:3.18
|
||||
LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
FROM alpine:3.21
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# Installation
|
||||
RUN apk add --update \
|
||||
@@ -33,7 +34,7 @@ RUN apk add --update \
|
||||
&& curl https://raw.githubusercontent.com/mludvig/smtp-cli/v3.10/smtp-cli -o /smtp-cli \
|
||||
&& chmod +x smtp-cli
|
||||
|
||||
COPY watchdog.sh /watchdog.sh
|
||||
COPY check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
||||
COPY data/Dockerfiles/watchdog/watchdog.sh /watchdog.sh
|
||||
COPY data/Dockerfiles/watchdog/check_mysql_slavestatus.sh /usr/lib/nagios/plugins/check_mysql_slavestatus.sh
|
||||
|
||||
CMD /watchdog.sh
|
||||
CMD ["/watchdog.sh"]
|
||||
|
||||
@@ -49,7 +49,7 @@
|
||||
# 2013101601 Optical clean up #
|
||||
# 2013101602 Rewrite help output #
|
||||
# 2013101700 Handle Slave IO in 'Connecting' state #
|
||||
# 2013101701 Minor changes in output, handling UNKWNON situations now #
|
||||
# 2013101701 Minor changes in output, handling UNKNOWN situations now #
|
||||
# 2013101702 Exit CRITICAL when Slave IO in Connecting state #
|
||||
# 2013123000 Slave_SQL_Running also matched Slave_SQL_Running_State #
|
||||
# 2015011600 Added 'moving' check to catch possible connection issues #
|
||||
@@ -131,10 +131,10 @@ elif [[ -n "${socket}" && (-z "${user}" || -z "${password}") ]]; then
|
||||
fi
|
||||
|
||||
# Connect to the DB server and store output in vars
|
||||
if [[ -n $socket ]]; then
|
||||
ConnectionResult=$(mysql ${optfile} ${socket} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
if [[ -n $socket ]]; then
|
||||
ConnectionResult=$(mariadb --skip-ssl ${optfile} ${socket} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
else
|
||||
ConnectionResult=$(mysql ${optfile} ${host} ${port} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
ConnectionResult=$(mariadb --skip-ssl ${optfile} ${host} ${port} ${user} -e "show slave ${connection} status\G" 2>&1)
|
||||
fi
|
||||
|
||||
if [ -z "`echo "${ConnectionResult}" |grep Slave_IO_State`" ]; then
|
||||
@@ -178,33 +178,33 @@ if [ ${check} = ${ok} ] && [ ${checkio} = ${ok} ]; then
|
||||
then echo "CRITICAL: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_CRITICAL}
|
||||
elif [[ ${delayinfo} -ge ${warn_delay} ]]
|
||||
then echo "WARNING: Slave is ${delayinfo} seconds behind Master | delay=${delayinfo}s"; exit ${STATE_WARNING}
|
||||
else
|
||||
else
|
||||
# Everything looks OK here but now let us check if the replication is moving
|
||||
if [[ -n ${moving} ]] && [[ -n ${tmpfile} ]] && [[ $readpos -eq $execpos ]]
|
||||
then
|
||||
#echo "Debug: Read pos is $readpos - Exec pos is $execpos"
|
||||
then
|
||||
#echo "Debug: Read pos is $readpos - Exec pos is $execpos"
|
||||
# Check if tmp file exists
|
||||
curtime=`date +%s`
|
||||
if [[ -w $tmpfile ]]
|
||||
then
|
||||
if [[ -w $tmpfile ]]
|
||||
then
|
||||
tmpfiletime=`date +%s -r $tmpfile`
|
||||
if [[ `expr $curtime - $tmpfiletime` -gt ${moving} ]]
|
||||
then
|
||||
exectmp=`cat $tmpfile`
|
||||
#echo "Debug: Exec pos in tmpfile is $exectmp"
|
||||
if [[ $exectmp -eq $execpos ]]
|
||||
then
|
||||
then
|
||||
# The value read from the tmp file and from db are the same. Replication hasnt moved!
|
||||
echo "WARNING: Slave replication has not moved in ${moving} seconds. Manual check required."; exit ${STATE_WARNING}
|
||||
else
|
||||
else
|
||||
# Replication has moved since the tmp file was written. Delete tmp file and output OK.
|
||||
rm $tmpfile
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
else
|
||||
else
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
else
|
||||
else
|
||||
echo "$execpos" > $tmpfile
|
||||
echo "OK: Slave SQL running: ${check} Slave IO running: ${checkio} / master: ${masterinfo} / slave is ${delayinfo} seconds behind master | delay=${delayinfo}s"; exit ${STATE_OK};
|
||||
fi
|
||||
|
||||
@@ -33,16 +33,16 @@ if [[ ! -p /tmp/com_pipe ]]; then
|
||||
fi
|
||||
|
||||
# Wait for containers
|
||||
while ! mysqladmin status --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT}"
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379"
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
@@ -169,9 +169,13 @@ function notify_error() {
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Escape subject and body (https://stackoverflow.com/a/2705678)
|
||||
ESCAPED_SUBJECT=$(echo ${SUBJECT} | sed -e 's/[\/&]/\\&/g')
|
||||
ESCAPED_BODY=$(echo ${BODY} | sed -e 's/[\/&]/\\&/g')
|
||||
|
||||
# Replace subject and body placeholders
|
||||
WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed "s/\$SUBJECT\|\${SUBJECT}/$SUBJECT/g" | sed "s/\$BODY\|\${BODY}/$BODY/g")
|
||||
|
||||
WEBHOOK_BODY=$(echo ${WATCHDOG_NOTIFY_WEBHOOK_BODY} | sed -e "s/\$SUBJECT\|\${SUBJECT}/$ESCAPED_SUBJECT/g" -e "s/\$BODY\|\${BODY}/$ESCAPED_BODY/g")
|
||||
|
||||
# POST to webhook
|
||||
curl -X POST -H "Content-Type: application/json" ${CURL_VERBOSE} -d "${WEBHOOK_BODY}" ${WATCHDOG_NOTIFY_WEBHOOK}
|
||||
|
||||
@@ -191,12 +195,12 @@ get_container_ip() {
|
||||
else
|
||||
sleep 0.5
|
||||
# get long container id for exact match
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
CONTAINER_ID=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring == \"${1}\") | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id"))
|
||||
# returned id can have multiple elements (if scaled), shuffle for random test
|
||||
CONTAINER_ID=($(printf "%s\n" "${CONTAINER_ID[@]}" | shuf))
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
for matched_container in "${CONTAINER_ID[@]}"; do
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
CONTAINER_IPS=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${matched_container}/json | jq -r '.NetworkSettings.Networks[].IPAddress'))
|
||||
for ip_match in "${CONTAINER_IPS[@]}"; do
|
||||
# grep will do nothing if one of these vars is empty
|
||||
[[ -z ${ip_match} ]] && continue
|
||||
@@ -230,7 +234,7 @@ external_checks() {
|
||||
diff_c=0
|
||||
THRESHOLD=${EXTERNAL_CHECKS_THRESHOLD}
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
GUID=$(mysql -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
|
||||
GUID=$(mariadb --skip-ssl -u${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'GUID'" -BN)
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
@@ -326,7 +330,7 @@ redis_checks() {
|
||||
touch /tmp/redis-mailcow; echo "$(tail -50 /tmp/redis-mailcow)" > /tmp/redis-mailcow
|
||||
host_ip=$(get_container_ip redis-mailcow)
|
||||
err_c_cur=${err_count}
|
||||
/usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "PING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
/usr/lib/nagios/plugins/check_tcp -4 -H redis-mailcow -p 6379 -E -s "AUTH ${REDISPASS}\nPING\n" -q "QUIT" -e "PONG" 2>> /tmp/redis-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
||||
progress "Redis" ${THRESHOLD} $(( ${THRESHOLD} - ${err_count} )) ${diff_c}
|
||||
@@ -398,7 +402,7 @@ sogo_checks() {
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
touch /tmp/sogo-mailcow; echo "$(tail -50 /tmp/sogo-mailcow)" > /tmp/sogo-mailcow
|
||||
host_ip=$(get_container_ip sogo-mailcow)
|
||||
host_ip=$SOGO_HOST
|
||||
err_c_cur=${err_count}
|
||||
/usr/lib/nagios/plugins/check_http -4 -H ${host_ip} -u /SOGo.index/ -p 20000 2>> /tmp/sogo-mailcow 1>&2; err_count=$(( ${err_count} + $? ))
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
@@ -499,12 +503,12 @@ dovecot_repl_checks() {
|
||||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${DOVECOT_REPL_THRESHOLD}
|
||||
D_REPL_STATUS=$(redis-cli -h redis -r GET DOVECOT_REPL_HEALTH)
|
||||
D_REPL_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning -r GET DOVECOT_REPL_HEALTH)
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
D_REPL_STATUS=$(redis-cli --raw -h redis GET DOVECOT_REPL_HEALTH)
|
||||
D_REPL_STATUS=$(redis-cli --raw -h redis -a ${REDISPASS} --no-auth-warning GET DOVECOT_REPL_HEALTH)
|
||||
if [[ "${D_REPL_STATUS}" != "1" ]]; then
|
||||
err_count=$(( ${err_count} + 1 ))
|
||||
fi
|
||||
@@ -574,19 +578,19 @@ ratelimit_checks() {
|
||||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${RATELIMIT_THRESHOLD}
|
||||
RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
|
||||
RL_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 0 | jq .qid)
|
||||
# Reduce error count by 2 after restarting an unhealthy container
|
||||
trap "[ ${err_count} -gt 1 ] && err_count=$(( ${err_count} - 2 ))" USR1
|
||||
while [ ${err_count} -lt ${THRESHOLD} ]; do
|
||||
err_c_cur=${err_count}
|
||||
RL_LOG_STATUS_PREV=${RL_LOG_STATUS}
|
||||
RL_LOG_STATUS=$(redis-cli -h redis LRANGE RL_LOG 0 0 | jq .qid)
|
||||
RL_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 0 | jq .qid)
|
||||
if [[ ${RL_LOG_STATUS_PREV} != ${RL_LOG_STATUS} ]]; then
|
||||
err_count=$(( ${err_count} + 1 ))
|
||||
echo 'Last 10 applied ratelimits (may overlap with previous reports).' > /tmp/ratelimit
|
||||
echo 'Full ratelimit buckets can be emptied by deleting the ratelimit hash from within mailcow UI (see /debug -> Protocols -> Ratelimit):' >> /tmp/ratelimit
|
||||
echo >> /tmp/ratelimit
|
||||
redis-cli --raw -h redis LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
|
||||
redis-cli --raw -h redis -a ${REDISPASS} --no-auth-warning LRANGE RL_LOG 0 10 | jq . >> /tmp/ratelimit
|
||||
fi
|
||||
[ ${err_c_cur} -eq ${err_count} ] && [ ! $((${err_count} - 1)) -lt 0 ] && err_count=$((${err_count} - 1)) diff_c=1
|
||||
[ ${err_c_cur} -ne ${err_count} ] && diff_c=$(( ${err_c_cur} - ${err_count} ))
|
||||
@@ -669,7 +673,7 @@ acme_checks() {
|
||||
err_count=0
|
||||
diff_c=0
|
||||
THRESHOLD=${ACME_THRESHOLD}
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME)
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning GET ACME_FAIL_TIME)
|
||||
if [[ -z "${ACME_LOG_STATUS}" ]]; then
|
||||
${REDIS_CMDLINE} SET ACME_FAIL_TIME 0
|
||||
ACME_LOG_STATUS=0
|
||||
@@ -681,7 +685,7 @@ acme_checks() {
|
||||
ACME_LOG_STATUS_PREV=${ACME_LOG_STATUS}
|
||||
ACME_LC=0
|
||||
until [[ ! -z ${ACME_LOG_STATUS} ]] || [ ${ACME_LC} -ge 3 ]; do
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis GET ACME_FAIL_TIME 2> /dev/null)
|
||||
ACME_LOG_STATUS=$(redis-cli -h redis -a ${REDISPASS} --no-auth-warning GET ACME_FAIL_TIME 2> /dev/null)
|
||||
sleep 3
|
||||
ACME_LC=$((ACME_LC+1))
|
||||
done
|
||||
@@ -716,7 +720,7 @@ rspamd_checks() {
|
||||
From: watchdog@localhost
|
||||
|
||||
Empty
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd/scan | jq -rc .default.required_score | sed 's/\..*//' )
|
||||
' | usr/bin/curl --max-time 10 -s --data-binary @- --unix-socket /var/lib/rspamd/rspamd.sock http://rspamd.${COMPOSE_PROJECT_NAME}_mailcow-network/scan | jq -rc .default.required_score | sed 's/\..*//' )
|
||||
if [[ ${SCORE} -ne 9999 ]]; then
|
||||
echo "Rspamd settings check failed, score returned: ${SCORE}" 2>> /tmp/rspamd-mailcow 1>&2
|
||||
err_count=$(( ${err_count} + 1))
|
||||
@@ -990,6 +994,7 @@ PID=$!
|
||||
echo "Spawned cert_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
|
||||
if [[ "${SKIP_OLEFY}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
(
|
||||
while true; do
|
||||
if ! olefy_checks; then
|
||||
@@ -1001,6 +1006,7 @@ done
|
||||
PID=$!
|
||||
echo "Spawned olefy_checks with PID ${PID}"
|
||||
BACKGROUND_TASKS+=(${PID})
|
||||
fi
|
||||
|
||||
(
|
||||
while true; do
|
||||
@@ -1095,12 +1101,12 @@ while true; do
|
||||
elif [[ ${com_pipe_answer} =~ .+-mailcow ]]; then
|
||||
kill -STOP ${BACKGROUND_TASKS[*]}
|
||||
sleep 10
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"${com_pipe_answer}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
if [[ "${com_pipe_answer}" == "php-fpm-mailcow" ]]; then
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
HAS_INITDB=$(curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/top | jq '.msg.Processes[] | contains(["php -c /usr/local/etc/php -f /web/inc/init_db.inc.php"])' | grep true)
|
||||
fi
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
S_RUNNING=$(($(date +%s) - $(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/json | jq .State.StartedAt | xargs -n1 date +%s -d)))
|
||||
if [ ${S_RUNNING} -lt 360 ]; then
|
||||
log_msg "Container is running for less than 360 seconds, skipping action..."
|
||||
elif [[ ! -z ${HAS_INITDB} ]]; then
|
||||
@@ -1108,7 +1114,7 @@ while true; do
|
||||
sleep 60
|
||||
else
|
||||
log_msg "Sending restart command to ${CONTAINER_ID}..."
|
||||
curl --silent --insecure -XPOST https://dockerapi/containers/${CONTAINER_ID}/restart
|
||||
curl --silent --insecure -XPOST https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
notify_error "${com_pipe_answer}"
|
||||
log_msg "Wait for restarted container to settle and continue watching..."
|
||||
sleep 35
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
map $http_x_forwarded_proto $client_req_scheme_nc {
|
||||
default $scheme;
|
||||
https https;
|
||||
}
|
||||
|
||||
server {
|
||||
include /etc/nginx/conf.d/listen_ssl.active;
|
||||
include /etc/nginx/conf.d/listen_plain.active;
|
||||
include /etc/nginx/mime.types;
|
||||
charset utf-8;
|
||||
override_charset on;
|
||||
|
||||
ssl_certificate /etc/ssl/mail/cert.pem;
|
||||
ssl_certificate_key /etc/ssl/mail/key.pem;
|
||||
ssl_protocols TLSv1.2 TLSv1.3;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_ciphers ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305;
|
||||
ssl_ecdh_curve X25519:X448:secp384r1:secp256k1;
|
||||
ssl_session_cache shared:SSL:50m;
|
||||
ssl_session_timeout 1d;
|
||||
ssl_session_tickets off;
|
||||
add_header Referrer-Policy "no-referrer" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Download-Options "noopen" always;
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none" always;
|
||||
add_header X-Robots-Tag "noindex, nofollow" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
|
||||
fastcgi_hide_header X-Powered-By;
|
||||
|
||||
server_name NC_SUBD;
|
||||
|
||||
root /web/nextcloud/;
|
||||
|
||||
location = /robots.txt {
|
||||
allow all;
|
||||
log_not_found off;
|
||||
access_log off;
|
||||
}
|
||||
|
||||
location = /.well-known/carddav {
|
||||
return 301 $client_req_scheme_nc://$host/remote.php/dav;
|
||||
}
|
||||
|
||||
location = /.well-known/caldav {
|
||||
return 301 $client_req_scheme_nc://$host/remote.php/dav;
|
||||
}
|
||||
|
||||
location = /.well-known/webfinger {
|
||||
return 301 $client_req_scheme_nc://$host/index.php/.well-known/webfinger;
|
||||
}
|
||||
|
||||
location = /.well-known/nodeinfo {
|
||||
return 301 $client_req_scheme_nc://$host/index.php/.well-known/nodeinfo;
|
||||
}
|
||||
|
||||
location ^~ /.well-known/acme-challenge/ {
|
||||
default_type "text/plain";
|
||||
root /web;
|
||||
}
|
||||
|
||||
fastcgi_buffers 64 4K;
|
||||
|
||||
gzip on;
|
||||
gzip_vary on;
|
||||
gzip_comp_level 4;
|
||||
gzip_min_length 256;
|
||||
gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
|
||||
gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
|
||||
set_real_ip_from fc00::/7;
|
||||
set_real_ip_from 10.0.0.0/8;
|
||||
set_real_ip_from 172.16.0.0/12;
|
||||
set_real_ip_from 192.168.0.0/16;
|
||||
real_ip_header X-Forwarded-For;
|
||||
real_ip_recursive on;
|
||||
|
||||
location / {
|
||||
rewrite ^ /index.php$uri;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:build|tests|config|lib|3rdparty|templates|data)\/ {
|
||||
deny all;
|
||||
}
|
||||
location ~ ^\/(?:\.|autotest|occ|issue|indie|db_|console) {
|
||||
deny all;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|ocs-provider\/.+)\.php(?:$|\/) {
|
||||
fastcgi_split_path_info ^(.+?\.php)(\/.*|)$;
|
||||
set $path_info $fastcgi_path_info;
|
||||
try_files $fastcgi_script_name =404;
|
||||
include fastcgi_params;
|
||||
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
|
||||
fastcgi_param PATH_INFO $path_info;
|
||||
fastcgi_param HTTPS on;
|
||||
# Avoid sending the security headers twice
|
||||
fastcgi_param modHeadersAvailable true;
|
||||
# Enable pretty urls
|
||||
fastcgi_param front_controller_active true;
|
||||
fastcgi_pass phpfpm:9002;
|
||||
fastcgi_intercept_errors on;
|
||||
fastcgi_request_buffering off;
|
||||
client_max_body_size 0;
|
||||
fastcgi_read_timeout 1200;
|
||||
}
|
||||
|
||||
location ~ ^\/(?:updater|ocs-provider)(?:$|\/) {
|
||||
try_files $uri/ =404;
|
||||
index index.php;
|
||||
}
|
||||
|
||||
location ~ \.(?:css|js|woff2?|svg|gif|map)$ {
|
||||
try_files $uri /index.php$request_uri;
|
||||
add_header Cache-Control "public, max-age=15778463";
|
||||
add_header Referrer-Policy "no-referrer" always;
|
||||
add_header X-Content-Type-Options "nosniff" always;
|
||||
add_header X-Download-Options "noopen" always;
|
||||
add_header X-Frame-Options "SAMEORIGIN" always;
|
||||
add_header X-Permitted-Cross-Domain-Policies "none" always;
|
||||
add_header X-Robots-Tag "none" always;
|
||||
add_header X-XSS-Protection "1; mode=block" always;
|
||||
access_log off;
|
||||
}
|
||||
|
||||
location ~ \.(?:png|html|ttf|ico|jpg|jpeg|bcmap)$ {
|
||||
try_files $uri /index.php$request_uri;
|
||||
access_log off;
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
docker exec -it -u www-data $(docker ps -f name=php-fpm-mailcow -q) php /web/nextcloud/occ ${@}
|
||||
@@ -1,8 +1,8 @@
|
||||
-----BEGIN DH PARAMETERS-----
|
||||
MIIBCAKCAQEA9iHB0CRDhV8wfBgqnmvuJpl0fzL3qL75R4ZvQHlfMNLrxuIz2x9D
|
||||
9zcDhPcBTVzV5Ay0AAkke4wP6r6wDQqXqBP4Y8IOkYAyLh3jM40jfHQzQt+5JdQl
|
||||
ond3kiscBsFOch/vMfSLMu3lAb0YhPNTvrxhMz7LcVAWYl82swASupdiKR+MgaQr
|
||||
XsugpmDKsHW60VmIM9B7K9Y+rNHwvMWkmISd0KxA8oOy1WJvsVEissMALZDE3c4w
|
||||
2xHmO2lXxgEx3aez28736t4m/KW3g9Zr31a1M0KusmfY//fGkPk4NUrLBOS2xrgp
|
||||
Y/rG1qSBdcVyerM0Ki93qCyHKYu4ene0OwIBAg==
|
||||
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
|
||||
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
|
||||
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
|
||||
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
|
||||
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
|
||||
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
|
||||
-----END DH PARAMETERS-----
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user