mirror of
https://github.com/mailcow/mailcow-dockerized.git
synced 2026-02-15 21:10:04 +00:00
Compare commits
343 Commits
feat/pytho
...
nightly
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0be3347f8 | ||
|
|
caaa4a414d | ||
|
|
4f08c4ed7d | ||
|
|
c3d841340c | ||
|
|
b8cd00111f | ||
|
|
81cda80651 | ||
|
|
c1d4f04c22 | ||
|
|
82276cd1ca | ||
|
|
56ea4302ed | ||
|
|
c06112b26e | ||
|
|
aa5a4f0998 | ||
|
|
bf4f471cfd | ||
|
|
978bff9dbc | ||
|
|
869d9af7dd | ||
|
|
af10499ecb | ||
|
|
a1a4d8ff98 | ||
|
|
95d61e8aa2 | ||
|
|
ec8dd1a54f | ||
|
|
382ee34d0e | ||
|
|
0999c9e9ab | ||
|
|
0e76396f01 | ||
|
|
9bbac9f171 | ||
|
|
c485968e7f | ||
|
|
e727620bd3 | ||
|
|
71fa3ecebc | ||
|
|
70101d1187 | ||
|
|
c060c205d3 | ||
|
|
038b2efb75 | ||
|
|
1fe4cd03e9 | ||
|
|
12e02e67ff | ||
|
|
b6f57dfb78 | ||
|
|
3ebf2c2d2d | ||
|
|
1bac6f1ee7 | ||
|
|
67e7acd6bd | ||
|
|
910ce573d6 | ||
|
|
689336b3e1 | ||
|
|
01cf72cdef | ||
|
|
4cdb97c699 | ||
|
|
1bd795a9c6 | ||
|
|
39f29e6c30 | ||
|
|
1ab6af21e3 | ||
|
|
5d95c48e0d | ||
|
|
dbb9e474b0 | ||
|
|
f8eed8c786 | ||
|
|
ef010aa39c | ||
|
|
79171ea6f5 | ||
|
|
4e3294b273 | ||
|
|
32a6ecddb6 | ||
|
|
f3d9833ecf | ||
|
|
930ca76ea7 | ||
|
|
9a2887cf46 | ||
|
|
9950914086 | ||
|
|
470cfb0026 | ||
|
|
6c106b4e4d | ||
|
|
3d6253a2b2 | ||
|
|
b873812588 | ||
|
|
514fefd2ed | ||
|
|
6f9ee2d151 | ||
|
|
9832006141 | ||
|
|
0413d26855 | ||
|
|
7b29c1f304 | ||
|
|
ae3ef391ee | ||
|
|
7313f996d3 | ||
|
|
62d16c9e56 | ||
|
|
674b41ce08 | ||
|
|
1b833be760 | ||
|
|
88adb1adf5 | ||
|
|
ec472f13cf | ||
|
|
2e1d98cc7c | ||
|
|
07d7e3dc30 | ||
|
|
b0f5aee628 | ||
|
|
d3065612fd | ||
|
|
9912e41f78 | ||
|
|
04200c99a4 | ||
|
|
45666d2c4e | ||
|
|
9a806e64ce | ||
|
|
95e0608749 | ||
|
|
22a09b9795 | ||
|
|
04d5c43550 | ||
|
|
fbcb8cbeb9 | ||
|
|
0338a36ecf | ||
|
|
23fb5e2fca | ||
|
|
3507ff2773 | ||
|
|
a4970397f1 | ||
|
|
4132f6bd48 | ||
|
|
e6f83853ae | ||
|
|
586b3a2ed1 | ||
|
|
6af2addf3c | ||
|
|
f6eed6c441 | ||
|
|
b85837c803 | ||
|
|
653fc40d4c | ||
|
|
c17d80a6fd | ||
|
|
980bfa3aa0 | ||
|
|
664a954393 | ||
|
|
d5a27c4ccb | ||
|
|
6a8a2e2136 | ||
|
|
b859a52b8e | ||
|
|
10e0c42eff | ||
|
|
f47df263d7 | ||
|
|
2642d9109e | ||
|
|
6708b94ebb | ||
|
|
79cf0abc6e | ||
|
|
7de70322d6 | ||
|
|
417835dea8 | ||
|
|
3dcacc4187 | ||
|
|
69f0552d4f | ||
|
|
c443a9400a | ||
|
|
5c9f387d94 | ||
|
|
e9414d17e4 | ||
|
|
6bfa58611e | ||
|
|
df4d3bb6e0 | ||
|
|
e31b6d9a07 | ||
|
|
455ef084b4 | ||
|
|
c2948735f2 | ||
|
|
24c62b2f09 | ||
|
|
7da088c931 | ||
|
|
1ef0149076 | ||
|
|
922d173540 | ||
|
|
fd088cb504 | ||
|
|
721ee2394e | ||
|
|
c217be06c6 | ||
|
|
871c422ec1 | ||
|
|
3cc28af607 | ||
|
|
796e131c3a | ||
|
|
dd160cd508 | ||
|
|
732b321962 | ||
|
|
c51a769aec | ||
|
|
45a61755a5 | ||
|
|
769c57c355 | ||
|
|
2e7eb7c0fd | ||
|
|
4c83147d01 | ||
|
|
ca0bec4fc2 | ||
|
|
6f50dd17da | ||
|
|
4a331929d0 | ||
|
|
748bc893b6 | ||
|
|
e462602ddc | ||
|
|
4e0f435d12 | ||
|
|
46f0581936 | ||
|
|
20f04ecf6b | ||
|
|
ff43799763 | ||
|
|
85ca197615 | ||
|
|
d06d23bbaf | ||
|
|
702ed85dfd | ||
|
|
8abe74a562 | ||
|
|
2f8a181281 | ||
|
|
5c5287ca21 | ||
|
|
83ba8d5840 | ||
|
|
ce219668cf | ||
|
|
5b1b49a418 | ||
|
|
8978a9ad79 | ||
|
|
5f4a4fd759 | ||
|
|
171c591da4 | ||
|
|
9133b9899c | ||
|
|
701c9fb1b4 | ||
|
|
eabd22188b | ||
|
|
7028619742 | ||
|
|
c915bf2ee2 | ||
|
|
011edd5ac9 | ||
|
|
7ba3de4ced | ||
|
|
8ead77083f | ||
|
|
b2774fb50b | ||
|
|
4440bd46ad | ||
|
|
28985973eb | ||
|
|
f2c4697ca3 | ||
|
|
383b5affb5 | ||
|
|
ed4dcff63b | ||
|
|
caca32bbba | ||
|
|
d31e74c778 | ||
|
|
6c00e29276 | ||
|
|
9940c503a2 | ||
|
|
4b2862cb3c | ||
|
|
a36485f0f1 | ||
|
|
78168ee80a | ||
|
|
610609378f | ||
|
|
260906e350 | ||
|
|
2891bbf82a | ||
|
|
eb26bcbc94 | ||
|
|
bb3c2fb4fe | ||
|
|
ef0f366d1c | ||
|
|
84e230de8f | ||
|
|
f67a12d157 | ||
|
|
34b48eedfc | ||
|
|
0d900d4fc8 | ||
|
|
642ac6d02c | ||
|
|
eb84847a5b | ||
|
|
4db1569c93 | ||
|
|
94c1a6c4e1 | ||
|
|
7ce3b0faed | ||
|
|
262fe04286 | ||
|
|
b1c088a57f | ||
|
|
1c438330c6 | ||
|
|
8cb25709ae | ||
|
|
221f2989b0 | ||
|
|
3d05207bc7 | ||
|
|
8c8497d885 | ||
|
|
56d083ced4 | ||
|
|
a90b3544a7 | ||
|
|
08aea7fb26 | ||
|
|
13f7f9830b | ||
|
|
2f75039194 | ||
|
|
1e192e14f4 | ||
|
|
9cd1f931fc | ||
|
|
8d7235b535 | ||
|
|
8446abd484 | ||
|
|
f67c0530f5 | ||
|
|
06db1d6a72 | ||
|
|
81775ab4d5 | ||
|
|
34877ecf9c | ||
|
|
dbde144014 | ||
|
|
5361a4a4ee | ||
|
|
0997548d7f | ||
|
|
921de02a2b | ||
|
|
48e90a72dc | ||
|
|
c0b7a98e6c | ||
|
|
6dc90186f9 | ||
|
|
0b0a65a3f3 | ||
|
|
6c5d82c4df | ||
|
|
5e66ffa366 | ||
|
|
4d88e19106 | ||
|
|
0cfcde673c | ||
|
|
29e28b47ed | ||
|
|
1cb38bacdb | ||
|
|
169aafec50 | ||
|
|
3826c4b5be | ||
|
|
e1410baaeb | ||
|
|
c39712af67 | ||
|
|
53c35493a5 | ||
|
|
ed5be5d7dc | ||
|
|
ac90ecaf4f | ||
|
|
fed3fc9514 | ||
|
|
35b9940db4 | ||
|
|
ece940b000 | ||
|
|
af871fdacb | ||
|
|
2b93b59cdd | ||
|
|
2b2da1679e | ||
|
|
8cdb0b869e | ||
|
|
1e42b8dd21 | ||
|
|
842cb235b6 | ||
|
|
e91d678bd1 | ||
|
|
ef5739c32f | ||
|
|
88bf9b02e1 | ||
|
|
3803b5d351 | ||
|
|
14d58c8163 | ||
|
|
728fcdb375 | ||
|
|
1fc36263dc | ||
|
|
69420113f7 | ||
|
|
4b5fd0b50a | ||
|
|
5aa9498f65 | ||
|
|
690d511e54 | ||
|
|
e2a2b42139 | ||
|
|
4bbda8006d | ||
|
|
a281746958 | ||
|
|
cec51b6162 | ||
|
|
107c5d2e7d | ||
|
|
00c025f31a | ||
|
|
9b6388d0d0 | ||
|
|
2f25fcad77 | ||
|
|
7067e2c714 | ||
|
|
9f3cdfa713 | ||
|
|
529acf5ff6 | ||
|
|
0371edcf5e | ||
|
|
d20254d4ee | ||
|
|
befecfc31d | ||
|
|
004fcf092b | ||
|
|
a487fcd0bd | ||
|
|
17e38a05f0 | ||
|
|
c503abfe40 | ||
|
|
73929db796 | ||
|
|
fb0685fa71 | ||
|
|
df36670c7c | ||
|
|
3f9215678d | ||
|
|
360fe03497 | ||
|
|
7557802933 | ||
|
|
2e9ba1e9b3 | ||
|
|
0ac0e5c252 | ||
|
|
795bcdc5d2 | ||
|
|
ad9b328ed5 | ||
|
|
3d5b57889a | ||
|
|
6b8e981bdc | ||
|
|
2f1eb4b004 | ||
|
|
3ee3d7d969 | ||
|
|
95eb350f15 | ||
|
|
1e5fcfe392 | ||
|
|
af61c82077 | ||
|
|
c066273c79 | ||
|
|
527f27d249 | ||
|
|
02557b2098 | ||
|
|
4c7a9ed195 | ||
|
|
d5b30a7a08 | ||
|
|
b7acef4d9d | ||
|
|
fc43c26c48 | ||
|
|
b12ce1eacd | ||
|
|
ec6dbb099a | ||
|
|
2fbbbbe9a9 | ||
|
|
1e4f3c55d8 | ||
|
|
a0f5454c2a | ||
|
|
4e7adacda9 | ||
|
|
4c64cf18a6 | ||
|
|
8a89f5c685 | ||
|
|
cc0e4fee9d | ||
|
|
5861c9af29 | ||
|
|
dd475c0ab3 | ||
|
|
407e9d3584 | ||
|
|
0c3e53e3a9 | ||
|
|
5ca10d1cde | ||
|
|
7907d43af7 | ||
|
|
d198f1d3f8 | ||
|
|
102226723e | ||
|
|
2efaccf038 | ||
|
|
aa7b6fa4a9 | ||
|
|
714727a129 | ||
|
|
4e5e264e3e | ||
|
|
267c81b42e | ||
|
|
f2f3fbe497 | ||
|
|
6ba650820f | ||
|
|
baa6286471 | ||
|
|
be8537d165 | ||
|
|
737fced7be | ||
|
|
5a532df8ce | ||
|
|
f8ce7a71e6 | ||
|
|
2e876bda9a | ||
|
|
d4f899b091 | ||
|
|
372923ae2f | ||
|
|
3bd01190bf | ||
|
|
d2e5926cce | ||
|
|
1994b9895b | ||
|
|
798e6a4c00 | ||
|
|
e3b576be67 | ||
|
|
3f493e043d | ||
|
|
0f7e359686 | ||
|
|
b9a0b2db6d | ||
|
|
93b876c473 | ||
|
|
2c47145dee | ||
|
|
92c2aa2023 | ||
|
|
9351cf24fe | ||
|
|
c3c68360dc | ||
|
|
a632980871 | ||
|
|
2d1ef41d32 | ||
|
|
120366fec7 | ||
|
|
244d4b8c4c | ||
|
|
f92ddd86c5 | ||
|
|
ba0349a911 | ||
|
|
8caf09cd80 |
69
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
69
.github/ISSUE_TEMPLATE/Bug_report.yml
vendored
@@ -11,22 +11,35 @@ body:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: I've found a bug and checked that ...
|
||||
description: Prior to placing the issue, please check following:** *(fill out each checkbox with an `X` once done)*
|
||||
label: Checklist prior issue creation
|
||||
description: Prior to creating the issue...
|
||||
options:
|
||||
- label: ... I understand that not following the below instructions will result in immediate closure and/or deletion of my issue.
|
||||
- label: I understand that failure to follow below instructions may cause this issue to be closed.
|
||||
required: true
|
||||
- label: ... I have understood that this bug report is dedicated for bugs, and not for support-related inquiries.
|
||||
- label: I understand that vague, incomplete or inaccurate information may cause this issue to be closed.
|
||||
required: true
|
||||
- label: ... I have understood that answers are voluntary and community-driven, and not commercial support.
|
||||
- label: I understand that this form is intended solely for reporting software bugs and not for support-related inquiries.
|
||||
required: true
|
||||
- label: ... I have verified that my issue has not been already answered in the past. I also checked previous [issues](https://github.com/mailcow/mailcow-dockerized/issues).
|
||||
- label: I understand that all responses are voluntary and community-driven, and do not constitute commercial support.
|
||||
required: true
|
||||
- label: I confirm that I have reviewed previous [issues](https://github.com/mailcow/mailcow-dockerized/issues) to ensure this matter has not already been addressed.
|
||||
required: true
|
||||
- label: I confirm that my environment meets all [prerequisite requirements](https://docs.mailcow.email/getstarted/prerequisite-system/) as specified in the official documentation.
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Description
|
||||
description: Please provide a brief description of the bug in 1-2 sentences. If applicable, add screenshots to help explain your problem. Very useful for bugs in mailcow UI.
|
||||
render: plain text
|
||||
description: Please provide a brief description of the bug. If applicable, add screenshots to help explain your problem. (Very useful for bugs in mailcow UI.)
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Steps to reproduce:"
|
||||
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
||||
placeholder: |-
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
@@ -36,45 +49,36 @@ body:
|
||||
render: plain text
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Steps to reproduce:"
|
||||
description: "Please describe the steps to reproduce the bug. Screenshots can be added, if helpful."
|
||||
render: plain text
|
||||
placeholder: |-
|
||||
1. ...
|
||||
2. ...
|
||||
3. ...
|
||||
validations:
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## System information
|
||||
### In this stage we would kindly ask you to attach general system information about your setup.
|
||||
In this stage we would kindly ask you to attach general system information about your setup.
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which branch are you using?"
|
||||
description: "#### `git rev-parse --abbrev-ref HEAD`"
|
||||
description: "#### Run: `git rev-parse --abbrev-ref HEAD`"
|
||||
multiple: false
|
||||
options:
|
||||
- master
|
||||
- master (stable)
|
||||
- staging
|
||||
- nightly
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: "Which architecture are you using?"
|
||||
description: "#### `uname -m`"
|
||||
description: "#### Run: `uname -m`"
|
||||
multiple: false
|
||||
options:
|
||||
- x86
|
||||
- x86_64
|
||||
- ARM64 (aarch64)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Operating System:"
|
||||
description: "#### Run: `lsb_release -ds`"
|
||||
placeholder: "e.g. Ubuntu 22.04 LTS"
|
||||
validations:
|
||||
required: true
|
||||
@@ -93,43 +97,44 @@ body:
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Virtualization technology:"
|
||||
placeholder: "KVM, VMware, Xen, etc - **LXC and OpenVZ are not supported**"
|
||||
description: "LXC and OpenVZ are not supported!"
|
||||
placeholder: "KVM, VMware ESXi, Xen, etc"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Docker version:"
|
||||
description: "#### `docker version`"
|
||||
description: "#### Run: `docker version`"
|
||||
placeholder: "20.10.21"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "docker-compose version or docker compose version:"
|
||||
description: "#### `docker-compose version` or `docker compose version`"
|
||||
description: "#### Run: `docker-compose version` or `docker compose version`"
|
||||
placeholder: "v2.12.2"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "mailcow version:"
|
||||
description: "#### ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
||||
placeholder: "2022-08"
|
||||
description: "#### Run: ```git describe --tags `git rev-list --tags --max-count=1` ```"
|
||||
placeholder: "2022-08x"
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: "Reverse proxy:"
|
||||
placeholder: "e.g. Nginx/Traefik"
|
||||
placeholder: "e.g. nginx/Traefik, or none"
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Logs of git diff:"
|
||||
description: "#### Output of `git diff origin/master`, any other changes to the code? If so, **please post them**:"
|
||||
description: "#### Output of `git diff origin/master`, any other changes to the code? Sanitize if needed. If so, **please post them**:"
|
||||
render: plain text
|
||||
validations:
|
||||
required: true
|
||||
required: false
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: "Logs of iptables -L -vn:"
|
||||
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Mark/Close Stale Issues and Pull Requests 🗑️
|
||||
uses: actions/stale@v9.1.0
|
||||
uses: actions/stale@v10.1.1
|
||||
with:
|
||||
repo-token: ${{ secrets.STALE_ACTION_PAT }}
|
||||
days-before-stale: 60
|
||||
|
||||
4
.github/workflows/image_builds.yml
vendored
4
.github/workflows/image_builds.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
images:
|
||||
- "acme-mailcow"
|
||||
- "clamd-mailcow"
|
||||
- "dockerapi-mailcow"
|
||||
- "controller-mailcow"
|
||||
- "dovecot-mailcow"
|
||||
- "netfilter-mailcow"
|
||||
- "olefy-mailcow"
|
||||
@@ -27,7 +27,7 @@ jobs:
|
||||
- "watchdog-mailcow"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- name: Setup Docker
|
||||
run: |
|
||||
curl -sSL https://get.docker.com/ | CHANNEL=stable sudo sh
|
||||
|
||||
4
.github/workflows/pr_to_nightly.yml
vendored
4
.github/workflows/pr_to_nightly.yml
vendored
@@ -8,11 +8,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run the Action
|
||||
uses: devops-infra/action-pull-request@v0.6.0
|
||||
uses: devops-infra/action-pull-request@v1.0.2
|
||||
with:
|
||||
github_token: ${{ secrets.PRTONIGHTLY_ACTION_PAT }}
|
||||
title: Automatic PR to nightly from ${{ github.event.repository.updated_at}}
|
||||
|
||||
2
.github/workflows/rebuild_backup_image.yml
vendored
2
.github/workflows/rebuild_backup_image.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
packages: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -15,14 +15,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Generate postscreen_access.cidr
|
||||
run: |
|
||||
bash helper-scripts/update_postscreen_whitelist.sh
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
uses: peter-evans/create-pull-request@v8
|
||||
with:
|
||||
token: ${{ secrets.mailcow_action_Update_postscreen_access_cidr_pat }}
|
||||
commit-message: update postscreen_access.cidr
|
||||
|
||||
45
.gitignore
vendored
45
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
!data/conf/nginx/dynmaps.conf
|
||||
!data/conf/nginx/meta_exporter.conf
|
||||
!data/conf/nginx/site.conf
|
||||
!/**/.gitkeep
|
||||
*.iml
|
||||
.idea
|
||||
@@ -5,33 +8,52 @@
|
||||
data/assets/ssl-example/*
|
||||
data/assets/ssl/*
|
||||
data/conf/borgmatic/
|
||||
data/conf/clamav/rendered_configs
|
||||
data/conf/clamav/whitelist.ign2
|
||||
data/conf/dovecot/acl_anyone
|
||||
data/conf/dovecot/dovecot-master.passwd
|
||||
data/conf/dovecot/dovecot-master.userdb
|
||||
data/conf/dovecot/extra.conf
|
||||
data/conf/dovecot/rendered_configs
|
||||
data/conf/nginx/rendered_configs
|
||||
data/conf/dovecot/mail_replica.conf
|
||||
data/conf/dovecot/global_sieve_*
|
||||
data/conf/dovecot/last_login
|
||||
data/conf/dovecot/lua
|
||||
data/conf/dovecot/mail_plugins*
|
||||
data/conf/dovecot/shared_namespace.conf
|
||||
data/conf/dovecot/sni.conf
|
||||
data/conf/dovecot/sogo-sso.conf
|
||||
data/conf/dovecot/sogo_trusted_ip.conf
|
||||
data/conf/dovecot/sql
|
||||
data/conf/dovecot/conf.d/fts.conf
|
||||
data/conf/nextcloud-*.bak
|
||||
data/conf/nginx/*.active
|
||||
data/conf/nginx/*.bak
|
||||
data/conf/nginx/*.conf
|
||||
data/conf/nginx/*.custom
|
||||
data/conf/phpfpm/sogo-sso/sogo-sso.pass
|
||||
data/conf/phpfpm/rendered_configs
|
||||
data/conf/portainer/
|
||||
data/conf/postfix/allow_mailcow_local.regexp
|
||||
data/conf/postfix/custom_postscreen_whitelist.cidr
|
||||
data/conf/postfix/custom_transport.pcre
|
||||
data/conf/postfix/extra.cf
|
||||
data/conf/postfix/rendered_configs
|
||||
data/conf/postfix/sni.map
|
||||
data/conf/postfix/sni.map.db
|
||||
data/conf/postfix/sql
|
||||
data/conf/postfix/dns_blocklists.cf
|
||||
data/conf/postfix/dnsbl_reply.map
|
||||
data/conf/rspamd/custom/*
|
||||
data/conf/rspamd/local.d/*
|
||||
data/conf/rspamd/override.d/*
|
||||
data/conf/rspamd/rendered_configs
|
||||
data/conf/sogo/custom-theme.js
|
||||
data/conf/sogo/plist_ldap
|
||||
data/conf/sogo/plist_ldap.sh
|
||||
data/conf/sogo/sieve.creds
|
||||
data/conf/sogo/cron.creds
|
||||
data/conf/sogo/custom-fulllogo.svg
|
||||
data/conf/sogo/custom-shortlogo.svg
|
||||
data/conf/sogo/custom-fulllogo.png
|
||||
data/conf/sogo/rendered_configs
|
||||
data/conf/mysql/rendered_configs
|
||||
data/gitea/
|
||||
data/gogs/
|
||||
data/hooks/clamd/*
|
||||
data/hooks/dovecot/*
|
||||
data/hooks/mariadb/*
|
||||
data/hooks/nginx/*
|
||||
data/hooks/phpfpm/*
|
||||
data/hooks/postfix/*
|
||||
data/hooks/rspamd/*
|
||||
@@ -53,3 +75,4 @@ refresh_images.sh
|
||||
update_diffs/
|
||||
create_cold_standby.sh
|
||||
!data/conf/nginx/mailcow_auth.conf
|
||||
data/conf/postfix/postfix-tlspol
|
||||
@@ -1,11 +1,11 @@
|
||||
# Contribution Guidelines
|
||||
**_Last modified on 15th August 2024_**
|
||||
**_Last modified on 12th November 2025_**
|
||||
|
||||
First of all, thank you for wanting to provide a bugfix or a new feature for the mailcow community, it's because of your help that the project can continue to grow!
|
||||
|
||||
As we want to keep mailcow's development structured we setup these Guidelines which helps you to create your issue/pull request accordingly.
|
||||
|
||||
**PLEASE NOTE, THAT WE MIGHT CLOSE ISSUES/PULL REQUESTS IF THEY DON'T FULLFIL OUR WRITTEN GUIDELINES WRITTEN INSIDE THIS DOCUMENT**. So please check this guidelines before you propose a Issue/Pull Request.
|
||||
**PLEASE NOTE, THAT WE WILL CLOSE ISSUES/PULL REQUESTS IF THEY DON'T FULFILL OUR WRITTEN GUIDELINES WRITTEN INSIDE THIS DOCUMENT**. So please check this guidelines before you propose a Issue/Pull Request.
|
||||
|
||||
## Topics
|
||||
|
||||
@@ -27,14 +27,18 @@ However, please note the following regarding pull requests:
|
||||
6. Please **ALWAYS** create the actual pull request against the staging branch and **NEVER** directly against the master branch. *If you forget to do this, our moobot will remind you to switch the branch to staging.*
|
||||
7. Wait for a merge commit: It may happen that we do not accept your pull request immediately or sometimes not at all for various reasons. Please do not be disappointed if this is the case. We always endeavor to incorporate any meaningful changes from the community into the mailcow project.
|
||||
8. If you are planning larger and therefore more complex pull requests, it would be advisable to first announce this in a separate issue and then start implementing it after the idea has been accepted in order to avoid unnecessary frustration and effort!
|
||||
9. If your PR requires a Docker image rebuild (changes to Dockerfiles or files in data/Dockerfiles/), update the image tag in docker-compose.yml. Use the base-image versioning (e.g. ghcr.io/mailcow/sogo:5.12.4 → :5.12.5 for version bumps; append a letter for patch fixes, e.g. :5.12.4a). Follow this scheme.
|
||||
|
||||
---
|
||||
|
||||
## Issue Reporting
|
||||
**_Last modified on 15th August 2024_**
|
||||
**_Last modified on 12th November 2025_**
|
||||
|
||||
If you plan to report a issue within mailcow please read and understand the following rules:
|
||||
|
||||
### Security disclosures / Security-related fixes
|
||||
- Security vulnerabilities and security fixes must always be reported confidentially first to the contact address specified in SECURITY.md before they are integrated, published, or publicly disclosed in issues/PRs. Please wait for a response from the specified contact to ensure coordinated and responsible disclosure.
|
||||
|
||||
### Issue Reporting Guidelines
|
||||
|
||||
1. **ONLY** use the issue tracker for bug reports or improvement requests and NOT for support questions. For support questions you can either contact the [mailcow community on Telegram](https://docs.mailcow.email/#community-support-and-chat) or the mailcow team directly in exchange for a [support fee](https://docs.mailcow.email/#commercial-support).
|
||||
|
||||
230
_modules/scripts/core.sh
Normal file
230
_modules/scripts/core.sh
Normal file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env bash
|
||||
# _modules/scripts/core.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
# ANSI color for red errors
|
||||
RED='\e[31m'
|
||||
GREEN='\e[32m'
|
||||
YELLOW='\e[33m'
|
||||
BLUE='\e[34m'
|
||||
MAGENTA='\e[35m'
|
||||
LIGHT_RED='\e[91m'
|
||||
LIGHT_GREEN='\e[92m'
|
||||
NC='\e[0m'
|
||||
|
||||
caller="${BASH_SOURCE[1]##*/}"
|
||||
|
||||
get_installed_tools(){
|
||||
for bin in openssl curl docker git awk sha1sum grep cut jq; do
|
||||
if [[ -z $(command -v ${bin}) ]]; then
|
||||
echo "Error: Cannot find command '${bin}'. Cannot proceed."
|
||||
echo "Solution: Please review system requirements and install requirements. Then, re-run the script."
|
||||
echo "See System Requirements: https://docs.mailcow.email/getstarted/install/"
|
||||
echo "Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
if grep --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox grep detected, please install gnu grep, \"apk add --no-cache --upgrade grep\"${NC}"; exit 1; fi
|
||||
# This will also cover sort
|
||||
if cp --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox cp detected, please install coreutils, \"apk add --no-cache --upgrade coreutils\"${NC}"; exit 1; fi
|
||||
if sed --help 2>&1 | head -n 1 | grep -q -i "busybox"; then echo -e "${LIGHT_RED}BusyBox sed detected, please install gnu sed, \"apk add --no-cache --upgrade sed\"${NC}"; exit 1; fi
|
||||
}
|
||||
|
||||
get_docker_version(){
|
||||
# Check Docker Version (need at least 24.X)
|
||||
docker_version=$(docker version --format '{{.Server.Version}}' | cut -d '.' -f 1)
|
||||
}
|
||||
|
||||
get_compose_type(){
|
||||
if docker compose > /dev/null 2>&1; then
|
||||
if docker compose version --short | grep -e "^[2-9]\." -e "^v[2-9]\." -e "^[1-9][0-9]\." -e "^v[1-9][0-9]\." > /dev/null 2>&1; then
|
||||
COMPOSE_VERSION=native
|
||||
COMPOSE_COMMAND="docker compose"
|
||||
if [[ "$caller" == "update.sh" ]]; then
|
||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=native/' "$SCRIPT_DIR/mailcow.conf"
|
||||
fi
|
||||
echo -e "\e[33mFound Docker Compose Plugin (native).\e[0m"
|
||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to native\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mNotice: You'll have to update this Compose Version via your Package Manager manually!\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
||||
echo -e "\e[31mPlease update/install it manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
elif docker-compose > /dev/null 2>&1; then
|
||||
if ! [[ $(alias docker-compose 2> /dev/null) ]] ; then
|
||||
if docker-compose version --short | grep -e "^[2-9]\." -e "^[1-9][0-9]\." > /dev/null 2>&1; then
|
||||
COMPOSE_VERSION=standalone
|
||||
COMPOSE_COMMAND="docker-compose"
|
||||
if [[ "$caller" == "update.sh" ]]; then
|
||||
sed -i 's/^DOCKER_COMPOSE_VERSION=.*/DOCKER_COMPOSE_VERSION=standalone/' "$SCRIPT_DIR/mailcow.conf"
|
||||
fi
|
||||
echo -e "\e[33mFound Docker Compose Standalone.\e[0m"
|
||||
echo -e "\e[33mSetting the DOCKER_COMPOSE_VERSION Variable to standalone\e[0m"
|
||||
sleep 2
|
||||
echo -e "\e[33mNotice: For an automatic update of docker-compose please use the update_compose.sh scripts located at the helper-scripts folder.\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose with a Version Higher than 2.X.X.\e[0m"
|
||||
echo -e "\e[31mPlease update/install manually regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo -e "\e[31mCannot find Docker Compose.\e[0m"
|
||||
echo -e "\e[31mPlease install it regarding to this doc site: https://docs.mailcow.email/install/\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
detect_bad_asn() {
|
||||
echo -e "\e[33mDetecting if your IP is listed on Spamhaus Bad ASN List...\e[0m"
|
||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
||||
if [ "$response" -eq 503 ]; then
|
||||
if [ -z "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
||||
echo -e "\e[33mmailcow did not detected a value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf!\e[0m"
|
||||
sleep 2
|
||||
echo ""
|
||||
echo -e "\e[33mTo use the Spamhaus DNS Blocklists again, you will need to create a FREE account for their Data Query Service (DQS) at: https://www.spamhaus.com/free-trial/sign-up-for-a-free-data-query-service-account\e[0m"
|
||||
echo -e "\e[33mOnce done, enter your DQS API key in mailcow.conf and mailcow will do the rest for you!\e[0m"
|
||||
echo ""
|
||||
sleep 2
|
||||
else
|
||||
echo -e "\e[33mYour server's public IP uses an AS that is blocked by Spamhaus to use their DNS public blocklists for Postfix.\e[0m"
|
||||
echo -e "\e[32mmailcow detected a Value for the variable SPAMHAUS_DQS_KEY inside mailcow.conf. Postfix will use DQS with the given API key...\e[0m"
|
||||
fi
|
||||
elif [ "$response" -eq 200 ]; then
|
||||
echo -e "\e[33mCheck completed! Your IP is \e[32mclean\e[0m"
|
||||
elif [ "$response" -eq 429 ]; then
|
||||
echo -e "\e[33mCheck completed! \e[31mYour IP seems to be rate limited on the ASN Check service... please try again later!\e[0m"
|
||||
else
|
||||
echo -e "\e[31mCheck failed! \e[0mMaybe a DNS or Network problem?\e[0m"
|
||||
fi
|
||||
}
|
||||
|
||||
check_online_status() {
|
||||
CHECK_ONLINE_DOMAINS=('https://github.com' 'https://hub.docker.com')
|
||||
for domain in "${CHECK_ONLINE_DOMAINS[@]}"; do
|
||||
if timeout 6 curl --head --silent --output /dev/null ${domain}; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
prefetch_images() {
|
||||
[[ -z ${BRANCH} ]] && { echo -e "\e[33m\nUnknown branch...\e[0m"; exit 1; }
|
||||
git fetch origin #${BRANCH}
|
||||
while read image; do
|
||||
RET_C=0
|
||||
until docker pull "${image}"; do
|
||||
RET_C=$((RET_C + 1))
|
||||
echo -e "\e[33m\nError pulling $image, retrying...\e[0m"
|
||||
[ ${RET_C} -gt 3 ] && { echo -e "\e[31m\nToo many failed retries, exiting\e[0m"; exit 1; }
|
||||
sleep 1
|
||||
done
|
||||
done < <(git show "origin/${BRANCH}:docker-compose.yml" | grep "image:" | awk '{ gsub("image:","", $3); print $2 }')
|
||||
}
|
||||
|
||||
docker_garbage() {
|
||||
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../.." && pwd )"
|
||||
IMGS_TO_DELETE=()
|
||||
|
||||
declare -A IMAGES_INFO
|
||||
COMPOSE_IMAGES=($(grep -oP "image: \K(ghcr\.io/)?mailcow.+" "${SCRIPT_DIR}/docker-compose.yml"))
|
||||
|
||||
for existing_image in $(docker images --format "{{.ID}}:{{.Repository}}:{{.Tag}}" | grep -E '(mailcow/|ghcr\.io/mailcow/)'); do
|
||||
ID=$(echo "$existing_image" | cut -d ':' -f 1)
|
||||
REPOSITORY=$(echo "$existing_image" | cut -d ':' -f 2)
|
||||
TAG=$(echo "$existing_image" | cut -d ':' -f 3)
|
||||
|
||||
if [[ "$REPOSITORY" == "mailcow/backup" || "$REPOSITORY" == "ghcr.io/mailcow/backup" ]]; then
|
||||
if [[ "$TAG" != "<none>" ]]; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ " ${COMPOSE_IMAGES[@]} " =~ " ${REPOSITORY}:${TAG} " ]]; then
|
||||
continue
|
||||
else
|
||||
IMGS_TO_DELETE+=("$ID")
|
||||
IMAGES_INFO["$ID"]="$REPOSITORY:$TAG"
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ! -z ${IMGS_TO_DELETE[*]} ]]; then
|
||||
echo "The following unused mailcow images were found:"
|
||||
for id in "${IMGS_TO_DELETE[@]}"; do
|
||||
echo " ${IMAGES_INFO[$id]} ($id)"
|
||||
done
|
||||
|
||||
if [ -z "$FORCE" ]; then
|
||||
read -r -p "Do you want to delete them to free up some space? [y/N] " response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
docker rmi ${IMGS_TO_DELETE[*]}
|
||||
else
|
||||
echo "OK, skipped."
|
||||
fi
|
||||
else
|
||||
echo "Running in forced mode! Force removing old mailcow images..."
|
||||
docker rmi ${IMGS_TO_DELETE[*]}
|
||||
fi
|
||||
echo -e "\e[32mFurther cleanup...\e[0m"
|
||||
echo "If you want to cleanup further garbage collected by Docker, please make sure all containers are up and running before cleaning your system by executing \"docker system prune\""
|
||||
fi
|
||||
}
|
||||
|
||||
in_array() {
|
||||
local e match="$1"
|
||||
shift
|
||||
for e; do [[ "$e" == "$match" ]] && return 0; done
|
||||
return 1
|
||||
}
|
||||
|
||||
detect_major_update() {
|
||||
if [ ${BRANCH} == "master" ]; then
|
||||
# Array with major versions
|
||||
# Add major versions here
|
||||
MAJOR_VERSIONS=(
|
||||
"2025-02"
|
||||
"2025-03"
|
||||
"2025-09"
|
||||
)
|
||||
|
||||
current_version=""
|
||||
if [[ -f "${SCRIPT_DIR}/data/web/inc/app_info.inc.php" ]]; then
|
||||
current_version=$(grep 'MAILCOW_GIT_VERSION' ${SCRIPT_DIR}/data/web/inc/app_info.inc.php | sed -E 's/.*MAILCOW_GIT_VERSION="([^"]+)".*/\1/')
|
||||
fi
|
||||
if [[ -z "$current_version" ]]; then
|
||||
return 1
|
||||
fi
|
||||
release_url="https://github.com/mailcow/mailcow-dockerized/releases/tag"
|
||||
|
||||
updates_to_apply=()
|
||||
|
||||
for version in "${MAJOR_VERSIONS[@]}"; do
|
||||
if [[ "$current_version" < "$version" ]]; then
|
||||
updates_to_apply+=("$version")
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ ${#updates_to_apply[@]} -gt 0 ]]; then
|
||||
echo -e "\e[33m\nMAJOR UPDATES to be applied:\e[0m"
|
||||
for update in "${updates_to_apply[@]}"; do
|
||||
echo "$update - $release_url/$update"
|
||||
done
|
||||
|
||||
echo -e "\nPlease read the release notes before proceeding."
|
||||
read -p "Do you want to proceed with the update? [y/n] " response
|
||||
if [[ "${response}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "Proceeding with the update..."
|
||||
else
|
||||
echo "Update canceled. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
239
_modules/scripts/ipv6_controller.sh
Normal file
239
_modules/scripts/ipv6_controller.sh
Normal file
@@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env bash
|
||||
# _modules/scripts/ipv6_controller.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
# 1) Check if the host supports IPv6
|
||||
get_ipv6_support() {
|
||||
# ---- helper: probe external IPv6 connectivity without DNS ----
|
||||
_probe_ipv6_connectivity() {
|
||||
# Use literal, always-on IPv6 echo responders (no DNS required)
|
||||
local PROBE_IPS=("2001:4860:4860::8888" "2606:4700:4700::1111")
|
||||
local ip rc=1
|
||||
|
||||
for ip in "${PROBE_IPS[@]}"; do
|
||||
if command -v ping6 &>/dev/null; then
|
||||
ping6 -c1 -W2 "$ip" &>/dev/null || ping6 -c1 -w2 "$ip" &>/dev/null
|
||||
rc=$?
|
||||
elif command -v ping &>/dev/null; then
|
||||
ping -6 -c1 -W2 "$ip" &>/dev/null || ping -6 -c1 -w2 "$ip" &>/dev/null
|
||||
rc=$?
|
||||
else
|
||||
rc=1
|
||||
fi
|
||||
[[ $rc -eq 0 ]] && return 0
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
if [[ ! -f /proc/net/if_inet6 ]] || grep -qs '^1' /proc/sys/net/ipv6/conf/all/disable_ipv6 2>/dev/null; then
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}IPv6 is administratively disabled${YELLOW}.${NC}"
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 route show default 2>/dev/null | grep -qE '^default'; then
|
||||
echo -e "${YELLOW}Default IPv6 route found – testing external IPv6 connectivity...${NC}"
|
||||
if _probe_ipv6_connectivity; then
|
||||
DETECTED_IPV6=true
|
||||
echo -e "IPv6 detected on host – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
||||
else
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Default IPv6 route present but external IPv6 connectivity failed – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 addr show scope global 2>/dev/null | grep -q 'inet6'; then
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Global IPv6 address present but no default route – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
return
|
||||
fi
|
||||
|
||||
if ip -6 addr show scope link 2>/dev/null | grep -q 'inet6'; then
|
||||
echo -e "${YELLOW}Only link-local IPv6 addresses found – testing external IPv6 connectivity...${NC}"
|
||||
if _probe_ipv6_connectivity; then
|
||||
DETECTED_IPV6=true
|
||||
echo -e "External IPv6 connectivity available – ${LIGHT_GREEN}leaving IPv6 support enabled${YELLOW}.${NC}"
|
||||
else
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}Only link-local IPv6 present and no external connectivity – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
fi
|
||||
return
|
||||
fi
|
||||
|
||||
DETECTED_IPV6=false
|
||||
echo -e "${YELLOW}IPv6 not detected on host – ${LIGHT_RED}disabling IPv6 support${YELLOW}.${NC}"
|
||||
}
|
||||
|
||||
# 2) Ensure Docker daemon.json has (or create) the required IPv6 settings
|
||||
docker_daemon_edit(){
|
||||
DOCKER_DAEMON_CONFIG="/etc/docker/daemon.json"
|
||||
DOCKER_MAJOR=$(docker version --format '{{.Server.Version}}' 2>/dev/null | cut -d. -f1)
|
||||
MISSING=()
|
||||
|
||||
_has_kv() { grep -Eq "\"$1\"[[:space:]]*:[[:space:]]*$2" "$DOCKER_DAEMON_CONFIG" 2>/dev/null; }
|
||||
|
||||
if [[ -f "$DOCKER_DAEMON_CONFIG" ]]; then
|
||||
|
||||
# reject empty or whitespace-only file immediately
|
||||
if [[ ! -s "$DOCKER_DAEMON_CONFIG" ]] || ! grep -Eq '[{}]' "$DOCKER_DAEMON_CONFIG"; then
|
||||
echo -e "${RED}ERROR: $DOCKER_DAEMON_CONFIG exists but is empty or contains no JSON braces – please initialize it with valid JSON (e.g. {}).${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Validate JSON if jq is present
|
||||
if command -v jq &>/dev/null && ! jq empty "$DOCKER_DAEMON_CONFIG" &>/dev/null; then
|
||||
echo -e "${RED}ERROR: Invalid JSON in $DOCKER_DAEMON_CONFIG – please correct manually.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Gather missing keys
|
||||
! _has_kv ipv6 true && MISSING+=("ipv6: true")
|
||||
|
||||
# For Docker < 28, keep requiring fixed-cidr-v6 (default bridge needs it on old engines)
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
! grep -Eq '"fixed-cidr-v6"[[:space:]]*:[[:space:]]*".+"' "$DOCKER_DAEMON_CONFIG" \
|
||||
&& MISSING+=('fixed-cidr-v6: "fd00:dead:beef:c0::/80"')
|
||||
fi
|
||||
|
||||
# For Docker < 27, ip6tables needed and was tied to experimental in older releases
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
_has_kv ipv6 true && ! _has_kv ip6tables true && MISSING+=("ip6tables: true")
|
||||
! _has_kv experimental true && MISSING+=("experimental: true")
|
||||
fi
|
||||
|
||||
# Fix if needed
|
||||
if ((${#MISSING[@]}>0)); then
|
||||
echo -e "${MAGENTA}Your daemon.json is missing: ${YELLOW}${MISSING[*]}${NC}"
|
||||
if [[ -n "$FORCE" ]]; then
|
||||
ans=Y
|
||||
else
|
||||
read -p "Would you like to update $DOCKER_DAEMON_CONFIG now? [Y/n] " ans
|
||||
ans=${ans:-Y}
|
||||
fi
|
||||
|
||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
||||
cp "$DOCKER_DAEMON_CONFIG" "${DOCKER_DAEMON_CONFIG}.bak"
|
||||
if command -v jq &>/dev/null; then
|
||||
TMP=$(mktemp)
|
||||
# Base filter: ensure ipv6 = true
|
||||
JQ_FILTER='.ipv6 = true'
|
||||
|
||||
# Add fixed-cidr-v6 only for Docker < 28
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
JQ_FILTER+=' | .["fixed-cidr-v6"] = (.["fixed-cidr-v6"] // "fd00:dead:beef:c0::/80")'
|
||||
fi
|
||||
|
||||
# Add ip6tables/experimental only for Docker < 27
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
JQ_FILTER+=' | .ip6tables = true | .experimental = true'
|
||||
fi
|
||||
|
||||
jq "$JQ_FILTER" "$DOCKER_DAEMON_CONFIG" >"$TMP" && mv "$TMP" "$DOCKER_DAEMON_CONFIG"
|
||||
echo -e "${LIGHT_GREEN}daemon.json updated. Restarting Docker...${NC}"
|
||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
||||
echo -e "${YELLOW}Docker restarted.${NC}"
|
||||
else
|
||||
echo -e "${RED}Please install jq or manually update daemon.json and restart Docker.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "${YELLOW}User declined Docker update – please insert these changes manually:${NC}"
|
||||
echo "${MISSING[*]}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
else
|
||||
# Create new daemon.json if missing
|
||||
if [[ -n "$FORCE" ]]; then
|
||||
ans=Y
|
||||
else
|
||||
read -p "$DOCKER_DAEMON_CONFIG not found. Create it with IPv6 settings? [Y/n] " ans
|
||||
ans=${ans:-Y}
|
||||
fi
|
||||
|
||||
if [[ $ans =~ ^[Yy]$ ]]; then
|
||||
mkdir -p "$(dirname "$DOCKER_DAEMON_CONFIG")"
|
||||
if [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 27 ]]; then
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80",
|
||||
"ip6tables": true,
|
||||
"experimental": true
|
||||
}
|
||||
EOF
|
||||
elif [[ -n "$DOCKER_MAJOR" && "$DOCKER_MAJOR" -lt 28 ]]; then
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "fd00:dead:beef:c0::/80"
|
||||
}
|
||||
EOF
|
||||
else
|
||||
# Docker 28+: ipv6 works without fixed-cidr-v6
|
||||
cat > "$DOCKER_DAEMON_CONFIG" <<EOF
|
||||
{
|
||||
"ipv6": true
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
echo -e "${GREEN}Created $DOCKER_DAEMON_CONFIG with IPv6 settings.${NC}"
|
||||
echo "Restarting Docker..."
|
||||
(command -v systemctl &>/dev/null && systemctl restart docker) || service docker restart
|
||||
echo "Docker restarted."
|
||||
else
|
||||
echo "User declined to create daemon.json – please manually merge the docker daemon with these configs:"
|
||||
echo "${MISSING[*]}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# 3) Main wrapper for generate_config.sh and update.sh
|
||||
configure_ipv6() {
|
||||
# detect manual override if mailcow.conf is present
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]] && grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
MANUAL_SETTING=$(grep '^ENABLE_IPV6=' "$MAILCOW_CONF" | cut -d= -f2)
|
||||
elif [[ -z "$MAILCOW_CONF" ]] && [[ -n "${ENABLE_IPV6:-}" ]]; then
|
||||
MANUAL_SETTING="$ENABLE_IPV6"
|
||||
else
|
||||
MANUAL_SETTING=""
|
||||
fi
|
||||
|
||||
get_ipv6_support
|
||||
|
||||
# if user manually set it, check for mismatch
|
||||
if [[ "$DETECTED_IPV6" != "true" ]]; then
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=false/' "$MAILCOW_CONF"
|
||||
else
|
||||
echo "ENABLE_IPV6=false" >> "$MAILCOW_CONF"
|
||||
fi
|
||||
else
|
||||
export IPV6_BOOL=false
|
||||
fi
|
||||
echo "Skipping Docker IPv6 configuration because host does not support IPv6."
|
||||
echo "Make sure to check if your docker daemon.json does not include \"enable_ipv6\": true if you do not want IPv6."
|
||||
echo "IPv6 configuration complete: ENABLE_IPV6=false"
|
||||
sleep 2
|
||||
return
|
||||
fi
|
||||
|
||||
docker_daemon_edit
|
||||
|
||||
if [[ -n "$MAILCOW_CONF" && -f "$MAILCOW_CONF" ]]; then
|
||||
if grep -q '^ENABLE_IPV6=' "$MAILCOW_CONF"; then
|
||||
sed -i 's/^ENABLE_IPV6=.*/ENABLE_IPV6=true/' "$MAILCOW_CONF"
|
||||
else
|
||||
echo "ENABLE_IPV6=true" >> "$MAILCOW_CONF"
|
||||
fi
|
||||
else
|
||||
export IPV6_BOOL=true
|
||||
fi
|
||||
|
||||
echo "IPv6 configuration complete: ENABLE_IPV6=true"
|
||||
}
|
||||
96
_modules/scripts/migrate_options.sh
Normal file
96
_modules/scripts/migrate_options.sh
Normal file
@@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env bash
|
||||
# _modules/scripts/migrate_options.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
migrate_config_options() {
|
||||
|
||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
||||
|
||||
KEYS=(
|
||||
SOLR_HEAP
|
||||
SKIP_SOLR
|
||||
SOLR_PORT
|
||||
FLATCURVE_EXPERIMENTAL
|
||||
DISABLE_IPv6
|
||||
ACME_CONTACT
|
||||
)
|
||||
|
||||
for key in "${KEYS[@]}"; do
|
||||
if grep -q "${key}" mailcow.conf; then
|
||||
case "${key}" in
|
||||
SOLR_HEAP)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/# Solr heap size in MB\b/d' mailcow.conf
|
||||
sed -i '/# Solr is a prone to run\b/d' mailcow.conf
|
||||
sed -i '/SOLR_HEAP\b/d' mailcow.conf
|
||||
;;
|
||||
SKIP_SOLR)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bSkip Solr on low-memory\b/d' mailcow.conf
|
||||
sed -i '/\bSolr is disabled by default\b/d' mailcow.conf
|
||||
sed -i '/\bDisable Solr or\b/d' mailcow.conf
|
||||
sed -i '/\bSKIP_SOLR\b/d' mailcow.conf
|
||||
;;
|
||||
SOLR_PORT)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bSOLR_PORT\b/d' mailcow.conf
|
||||
;;
|
||||
FLATCURVE_EXPERIMENTAL)
|
||||
echo "Removing ${key} in mailcow.conf"
|
||||
sed -i '/\bFLATCURVE_EXPERIMENTAL\b/d' mailcow.conf
|
||||
;;
|
||||
DISABLE_IPv6)
|
||||
echo "Migrating ${key} to ENABLE_IPv6 in mailcow.conf"
|
||||
local old=$(grep '^DISABLE_IPv6=' "mailcow.conf" | cut -d'=' -f2)
|
||||
local new
|
||||
if [[ "$old" == "y" ]]; then
|
||||
new="false"
|
||||
else
|
||||
new="true"
|
||||
fi
|
||||
sed -i '/^DISABLE_IPv6=/d' "mailcow.conf"
|
||||
echo "ENABLE_IPV6=$new" >> "mailcow.conf"
|
||||
;;
|
||||
ACME_CONTACT)
|
||||
echo "Deleting obsoleted ${key} in mailcow.conf"
|
||||
sed -i '/^# Lets Encrypt registration contact information/d' mailcow.conf
|
||||
sed -i '/^# Optional: Leave empty for none/d' mailcow.conf
|
||||
sed -i '/^# This value is only used on first order!/d' mailcow.conf
|
||||
sed -i '/^# Setting it at a later point will require the following steps:/d' mailcow.conf
|
||||
sed -i '/^# https:\/\/docs.mailcow.email\/troubleshooting\/debug-reset_tls\//d' mailcow.conf
|
||||
sed -i '/^ACME_CONTACT=.*/d' mailcow.conf
|
||||
sed -i '/^#ACME_CONTACT=.*/d' mailcow.conf
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
done
|
||||
|
||||
solr_volume=$(docker volume ls -qf name=^${COMPOSE_PROJECT_NAME}_solr-vol-1)
|
||||
if [[ -n $solr_volume ]]; then
|
||||
echo -e "\e[34mSolr has been replaced within mailcow since 2025-01.\nThe volume $solr_volume is unused.\e[0m"
|
||||
sleep 1
|
||||
if [ ! "$FORCE" ]; then
|
||||
read -r -p "Remove $solr_volume? [y/N] " response
|
||||
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -e "\e[33mRemoving $solr_volume...\e[0m"
|
||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
||||
else
|
||||
echo -e "Not removing $solr_volume. Run \`docker volume rm $solr_volume\` manually if needed."
|
||||
fi
|
||||
else
|
||||
echo -e "\e[33mForce removing $solr_volume...\e[0m"
|
||||
docker volume rm $solr_volume || echo -e "\e[31mFailed to remove. Remove it manually!\e[0m"
|
||||
echo -e "\e[32mSuccessfully removed $solr_volume!\e[0m"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete old fts.conf before forced switch to flatcurve to ensure update is working properly
|
||||
FTS_CONF_PATH="${SCRIPT_DIR}/data/conf/dovecot/conf.d/fts.conf"
|
||||
if [[ -f "$FTS_CONF_PATH" ]]; then
|
||||
if grep -q "Autogenerated by mailcow" "$FTS_CONF_PATH"; then
|
||||
rm -rf $FTS_CONF_PATH
|
||||
fi
|
||||
fi
|
||||
}
|
||||
300
_modules/scripts/new_options.sh
Normal file
300
_modules/scripts/new_options.sh
Normal file
@@ -0,0 +1,300 @@
|
||||
#!/usr/bin/env bash
|
||||
# _modules/scripts/new_options.sh
|
||||
# THIS SCRIPT IS DESIGNED TO BE RUNNING BY MAILCOW SCRIPTS ONLY!
|
||||
# DO NOT, AGAIN, NOT TRY TO RUN THIS SCRIPT STANDALONE!!!!!!
|
||||
|
||||
adapt_new_options() {
|
||||
|
||||
CONFIG_ARRAY=(
|
||||
"AUTODISCOVER_SAN"
|
||||
"SKIP_LETS_ENCRYPT"
|
||||
"SKIP_SOGO"
|
||||
"USE_WATCHDOG"
|
||||
"WATCHDOG_NOTIFY_EMAIL"
|
||||
"WATCHDOG_NOTIFY_WEBHOOK"
|
||||
"WATCHDOG_NOTIFY_WEBHOOK_BODY"
|
||||
"WATCHDOG_NOTIFY_BAN"
|
||||
"WATCHDOG_NOTIFY_START"
|
||||
"WATCHDOG_EXTERNAL_CHECKS"
|
||||
"WATCHDOG_SUBJECT"
|
||||
"SKIP_CLAMD"
|
||||
"SKIP_OLEFY"
|
||||
"SKIP_IP_CHECK"
|
||||
"ADDITIONAL_SAN"
|
||||
"DOVEADM_PORT"
|
||||
"IPV4_NETWORK"
|
||||
"IPV6_NETWORK"
|
||||
"LOG_LINES"
|
||||
"SNAT_TO_SOURCE"
|
||||
"SNAT6_TO_SOURCE"
|
||||
"COMPOSE_PROJECT_NAME"
|
||||
"DOCKER_COMPOSE_VERSION"
|
||||
"SQL_PORT"
|
||||
"API_KEY"
|
||||
"API_KEY_READ_ONLY"
|
||||
"API_ALLOW_FROM"
|
||||
"MAILDIR_GC_TIME"
|
||||
"MAILDIR_SUB"
|
||||
"ACL_ANYONE"
|
||||
"FTS_HEAP"
|
||||
"FTS_PROCS"
|
||||
"SKIP_FTS"
|
||||
"ENABLE_SSL_SNI"
|
||||
"ALLOW_ADMIN_EMAIL_LOGIN"
|
||||
"SKIP_HTTP_VERIFICATION"
|
||||
"SOGO_EXPIRE_SESSION"
|
||||
"SOGO_URL_ENCRYPTION_KEY"
|
||||
"REDIS_PORT"
|
||||
"REDISPASS"
|
||||
"DOVECOT_MASTER_USER"
|
||||
"DOVECOT_MASTER_PASS"
|
||||
"MAILCOW_PASS_SCHEME"
|
||||
"ADDITIONAL_SERVER_NAMES"
|
||||
"WATCHDOG_VERBOSE"
|
||||
"WEBAUTHN_ONLY_TRUSTED_VENDORS"
|
||||
"SPAMHAUS_DQS_KEY"
|
||||
"SKIP_UNBOUND_HEALTHCHECK"
|
||||
"DISABLE_NETFILTER_ISOLATION_RULE"
|
||||
"HTTP_REDIRECT"
|
||||
"ENABLE_IPV6"
|
||||
)
|
||||
|
||||
sed -i --follow-symlinks '$a\' mailcow.conf
|
||||
for option in ${CONFIG_ARRAY[@]}; do
|
||||
if grep -q "${option}" mailcow.conf; then
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "Adding new option \"${option}\" to mailcow.conf"
|
||||
|
||||
case "${option}" in
|
||||
AUTODISCOVER_SAN)
|
||||
echo '# Obtain certificates for autodiscover.* and autoconfig.* domains.' >> mailcow.conf
|
||||
echo '# This can be useful to switch off in case you are in a scenario where a reverse proxy already handles those.' >> mailcow.conf
|
||||
echo '# There are mixed scenarios where ports 80,443 are occupied and you do not want to share certs' >> mailcow.conf
|
||||
echo '# between services. So acme-mailcow obtains for maildomains and all web-things get handled' >> mailcow.conf
|
||||
echo '# in the reverse proxy.' >> mailcow.conf
|
||||
echo 'AUTODISCOVER_SAN=y' >> mailcow.conf
|
||||
;;
|
||||
|
||||
DOCKER_COMPOSE_VERSION)
|
||||
echo "# Used Docker Compose version" >> mailcow.conf
|
||||
echo "# Switch here between native (compose plugin) and standalone" >> mailcow.conf
|
||||
echo "# For more informations take a look at the mailcow docs regarding the configuration options." >> mailcow.conf
|
||||
echo "# Normally this should be untouched but if you decided to use either of those you can switch it manually here." >> mailcow.conf
|
||||
echo "# Please be aware that at least one of those variants should be installed on your machine or mailcow will fail." >> mailcow.conf
|
||||
echo "" >> mailcow.conf
|
||||
echo "DOCKER_COMPOSE_VERSION=${DOCKER_COMPOSE_VERSION}" >> mailcow.conf
|
||||
;;
|
||||
|
||||
DOVEADM_PORT)
|
||||
echo "DOVEADM_PORT=127.0.0.1:19991" >> mailcow.conf
|
||||
;;
|
||||
|
||||
LOG_LINES)
|
||||
echo '# Max log lines per service to keep in Redis logs' >> mailcow.conf
|
||||
echo "LOG_LINES=9999" >> mailcow.conf
|
||||
;;
|
||||
IPV4_NETWORK)
|
||||
echo '# Internal IPv4 /24 subnet, format n.n.n. (expands to n.n.n.0/24)' >> mailcow.conf
|
||||
echo "IPV4_NETWORK=172.22.1" >> mailcow.conf
|
||||
;;
|
||||
IPV6_NETWORK)
|
||||
echo '# Internal IPv6 subnet in fc00::/7' >> mailcow.conf
|
||||
echo "IPV6_NETWORK=fd4d:6169:6c63:6f77::/64" >> mailcow.conf
|
||||
;;
|
||||
SQL_PORT)
|
||||
echo '# Bind SQL to 127.0.0.1 on port 13306' >> mailcow.conf
|
||||
echo "SQL_PORT=127.0.0.1:13306" >> mailcow.conf
|
||||
;;
|
||||
API_KEY)
|
||||
echo '# Create or override API key for web UI' >> mailcow.conf
|
||||
echo "#API_KEY=" >> mailcow.conf
|
||||
;;
|
||||
API_KEY_READ_ONLY)
|
||||
echo '# Create or override read-only API key for web UI' >> mailcow.conf
|
||||
echo "#API_KEY_READ_ONLY=" >> mailcow.conf
|
||||
;;
|
||||
API_ALLOW_FROM)
|
||||
echo '# Must be set for API_KEY to be active' >> mailcow.conf
|
||||
echo '# IPs only, no networks (networks can be set via UI)' >> mailcow.conf
|
||||
echo "#API_ALLOW_FROM=" >> mailcow.conf
|
||||
;;
|
||||
SNAT_TO_SOURCE)
|
||||
echo '# Use this IPv4 for outgoing connections (SNAT)' >> mailcow.conf
|
||||
echo "#SNAT_TO_SOURCE=" >> mailcow.conf
|
||||
;;
|
||||
SNAT6_TO_SOURCE)
|
||||
echo '# Use this IPv6 for outgoing connections (SNAT)' >> mailcow.conf
|
||||
echo "#SNAT6_TO_SOURCE=" >> mailcow.conf
|
||||
;;
|
||||
MAILDIR_GC_TIME)
|
||||
echo '# Garbage collector cleanup' >> mailcow.conf
|
||||
echo '# Deleted domains and mailboxes are moved to /var/vmail/_garbage/timestamp_sanitizedstring' >> mailcow.conf
|
||||
echo '# How long should objects remain in the garbage until they are being deleted? (value in minutes)' >> mailcow.conf
|
||||
echo '# Check interval is hourly' >> mailcow.conf
|
||||
echo 'MAILDIR_GC_TIME=1440' >> mailcow.conf
|
||||
;;
|
||||
ACL_ANYONE)
|
||||
echo '# Set this to "allow" to enable the anyone pseudo user. Disabled by default.' >> mailcow.conf
|
||||
echo '# When enabled, ACL can be created, that apply to "All authenticated users"' >> mailcow.conf
|
||||
echo '# This should probably only be activated on mail hosts, that are used exclusively by one organisation.' >> mailcow.conf
|
||||
echo '# Otherwise a user might share data with too many other users.' >> mailcow.conf
|
||||
echo 'ACL_ANYONE=disallow' >> mailcow.conf
|
||||
;;
|
||||
FTS_HEAP)
|
||||
echo '# Dovecot Indexing (FTS) Process maximum heap size in MB, there is no recommendation, please see Dovecot docs.' >> mailcow.conf
|
||||
echo '# Flatcurve is used as FTS Engine. It is supposed to be pretty efficient in CPU and RAM consumption.' >> mailcow.conf
|
||||
echo '# Please always monitor your Resource consumption!' >> mailcow.conf
|
||||
echo "FTS_HEAP=128" >> mailcow.conf
|
||||
;;
|
||||
SKIP_FTS)
|
||||
echo '# Skip FTS (Fulltext Search) for Dovecot on low-memory, low-threaded systems or if you simply want to disable it.' >> mailcow.conf
|
||||
echo "# Dovecot inside mailcow use Flatcurve as FTS Backend." >> mailcow.conf
|
||||
echo "SKIP_FTS=y" >> mailcow.conf
|
||||
;;
|
||||
FTS_PROCS)
|
||||
echo '# Controls how many processes the Dovecot indexing process can spawn at max.' >> mailcow.conf
|
||||
echo '# Too many indexing processes can use a lot of CPU and Disk I/O' >> mailcow.conf
|
||||
echo '# Please visit: https://doc.dovecot.org/configuration_manual/service_configuration/#indexer-worker for more informations' >> mailcow.conf
|
||||
echo "FTS_PROCS=1" >> mailcow.conf
|
||||
;;
|
||||
ENABLE_SSL_SNI)
|
||||
echo '# Create seperate certificates for all domains - y/n' >> mailcow.conf
|
||||
echo '# this will allow adding more than 100 domains, but some email clients will not be able to connect with alternative hostnames' >> mailcow.conf
|
||||
echo '# see https://wiki.dovecot.org/SSL/SNIClientSupport' >> mailcow.conf
|
||||
echo "ENABLE_SSL_SNI=n" >> mailcow.conf
|
||||
;;
|
||||
SKIP_SOGO)
|
||||
echo '# Skip SOGo: Will disable SOGo integration and therefore webmail, DAV protocols and ActiveSync support (experimental, unsupported, not fully implemented) - y/n' >> mailcow.conf
|
||||
echo "SKIP_SOGO=n" >> mailcow.conf
|
||||
;;
|
||||
MAILDIR_SUB)
|
||||
echo '# MAILDIR_SUB defines a path in a users virtual home to keep the maildir in. Leave empty for updated setups.' >> mailcow.conf
|
||||
echo "#MAILDIR_SUB=Maildir" >> mailcow.conf
|
||||
echo "MAILDIR_SUB=" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_WEBHOOK)
|
||||
echo '# Send notifications to a webhook URL that receives a POST request with the content type "application/json".' >> mailcow.conf
|
||||
echo '# You can use this to send notifications to services like Discord, Slack and others.' >> mailcow.conf
|
||||
echo '#WATCHDOG_NOTIFY_WEBHOOK=https://discord.com/api/webhooks/XXXXXXXXXXXXXXXXXXX/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_WEBHOOK_BODY)
|
||||
echo '# JSON body included in the webhook POST request. Needs to be in single quotes.' >> mailcow.conf
|
||||
echo '# Following variables are available: SUBJECT, BODY' >> mailcow.conf
|
||||
WEBHOOK_BODY='{"username": "mailcow Watchdog", "content": "**${SUBJECT}**\n${BODY}"}'
|
||||
echo "#WATCHDOG_NOTIFY_WEBHOOK_BODY='${WEBHOOK_BODY}'" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_BAN)
|
||||
echo '# Notify about banned IP. Includes whois lookup.' >> mailcow.conf
|
||||
echo "WATCHDOG_NOTIFY_BAN=y" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_NOTIFY_START)
|
||||
echo '# Send a notification when the watchdog is started.' >> mailcow.conf
|
||||
echo "WATCHDOG_NOTIFY_START=y" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_SUBJECT)
|
||||
echo '# Subject for watchdog mails. Defaults to "Watchdog ALERT" followed by the error message.' >> mailcow.conf
|
||||
echo "#WATCHDOG_SUBJECT=" >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_EXTERNAL_CHECKS)
|
||||
echo '# Checks if mailcow is an open relay. Requires a SAL. More checks will follow.' >> mailcow.conf
|
||||
echo '# No data is collected. Opt-in and anonymous.' >> mailcow.conf
|
||||
echo '# Will only work with unmodified mailcow setups.' >> mailcow.conf
|
||||
echo "WATCHDOG_EXTERNAL_CHECKS=n" >> mailcow.conf
|
||||
;;
|
||||
SOGO_EXPIRE_SESSION)
|
||||
echo '# SOGo session timeout in minutes' >> mailcow.conf
|
||||
echo "SOGO_EXPIRE_SESSION=480" >> mailcow.conf
|
||||
;;
|
||||
REDIS_PORT)
|
||||
echo "REDIS_PORT=127.0.0.1:7654" >> mailcow.conf
|
||||
;;
|
||||
DOVECOT_MASTER_USER)
|
||||
echo '# DOVECOT_MASTER_USER and _PASS must _both_ be provided. No special chars.' >> mailcow.conf
|
||||
echo '# Empty by default to auto-generate master user and password on start.' >> mailcow.conf
|
||||
echo '# User expands to DOVECOT_MASTER_USER@mailcow.local' >> mailcow.conf
|
||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
||||
echo "DOVECOT_MASTER_USER=" >> mailcow.conf
|
||||
;;
|
||||
DOVECOT_MASTER_PASS)
|
||||
echo '# LEAVE EMPTY IF UNSURE' >> mailcow.conf
|
||||
echo "DOVECOT_MASTER_PASS=" >> mailcow.conf
|
||||
;;
|
||||
MAILCOW_PASS_SCHEME)
|
||||
echo '# Password hash algorithm' >> mailcow.conf
|
||||
echo '# Only certain password hash algorithm are supported. For a fully list of supported schemes,' >> mailcow.conf
|
||||
echo '# see https://docs.mailcow.email/models/model-passwd/' >> mailcow.conf
|
||||
echo "MAILCOW_PASS_SCHEME=BLF-CRYPT" >> mailcow.conf
|
||||
;;
|
||||
ADDITIONAL_SERVER_NAMES)
|
||||
echo '# Additional server names for mailcow UI' >> mailcow.conf
|
||||
echo '#' >> mailcow.conf
|
||||
echo '# Specify alternative addresses for the mailcow UI to respond to' >> mailcow.conf
|
||||
echo '# This is useful when you set mail.* as ADDITIONAL_SAN and want to make sure mail.maildomain.com will always point to the mailcow UI.' >> mailcow.conf
|
||||
echo '# If the server name does not match a known site, Nginx decides by best-guess and may redirect users to the wrong web root.' >> mailcow.conf
|
||||
echo '# You can understand this as server_name directive in Nginx.' >> mailcow.conf
|
||||
echo '# Comma separated list without spaces! Example: ADDITIONAL_SERVER_NAMES=a.b.c,d.e.f' >> mailcow.conf
|
||||
echo 'ADDITIONAL_SERVER_NAMES=' >> mailcow.conf
|
||||
;;
|
||||
WEBAUTHN_ONLY_TRUSTED_VENDORS)
|
||||
echo "# WebAuthn device manufacturer verification" >> mailcow.conf
|
||||
echo '# After setting WEBAUTHN_ONLY_TRUSTED_VENDORS=y only devices from trusted manufacturers are allowed' >> mailcow.conf
|
||||
echo '# root certificates can be placed for validation under mailcow-dockerized/data/web/inc/lib/WebAuthn/rootCertificates' >> mailcow.conf
|
||||
echo 'WEBAUTHN_ONLY_TRUSTED_VENDORS=n' >> mailcow.conf
|
||||
;;
|
||||
SPAMHAUS_DQS_KEY)
|
||||
echo "# Spamhaus Data Query Service Key" >> mailcow.conf
|
||||
echo '# Optional: Leave empty for none' >> mailcow.conf
|
||||
echo '# Enter your key here if you are using a blocked ASN (OVH, AWS, Cloudflare e.g) for the unregistered Spamhaus Blocklist.' >> mailcow.conf
|
||||
echo '# If empty, it will completely disable Spamhaus blocklists if it detects that you are running on a server using a blocked AS.' >> mailcow.conf
|
||||
echo '# Otherwise it will work as usual.' >> mailcow.conf
|
||||
echo 'SPAMHAUS_DQS_KEY=' >> mailcow.conf
|
||||
;;
|
||||
WATCHDOG_VERBOSE)
|
||||
echo '# Enable watchdog verbose logging' >> mailcow.conf
|
||||
echo 'WATCHDOG_VERBOSE=n' >> mailcow.conf
|
||||
;;
|
||||
SKIP_UNBOUND_HEALTHCHECK)
|
||||
echo '# Skip Unbound (DNS Resolver) Healthchecks (NOT Recommended!) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_UNBOUND_HEALTHCHECK=n' >> mailcow.conf
|
||||
;;
|
||||
DISABLE_NETFILTER_ISOLATION_RULE)
|
||||
echo '# Prevent netfilter from setting an iptables/nftables rule to isolate the mailcow docker network - y/n' >> mailcow.conf
|
||||
echo '# CAUTION: Disabling this may expose container ports to other neighbors on the same subnet, even if the ports are bound to localhost' >> mailcow.conf
|
||||
echo 'DISABLE_NETFILTER_ISOLATION_RULE=n' >> mailcow.conf
|
||||
;;
|
||||
HTTP_REDIRECT)
|
||||
echo '# Redirect HTTP connections to HTTPS - y/n' >> mailcow.conf
|
||||
echo 'HTTP_REDIRECT=n' >> mailcow.conf
|
||||
;;
|
||||
ENABLE_IPV6)
|
||||
echo '# IPv6 Controller Section' >> mailcow.conf
|
||||
echo '# This variable controls the usage of IPv6 within mailcow.' >> mailcow.conf
|
||||
echo '# Can either be true or false | Defaults to true' >> mailcow.conf
|
||||
echo '# WARNING: MAKE SURE TO PROPERLY CONFIGURE IPv6 ON YOUR HOST FIRST BEFORE ENABLING THIS AS FAULTY CONFIGURATIONS CAN LEAD TO OPEN RELAYS!' >> mailcow.conf
|
||||
echo '# A COMPLETE DOCKER STACK REBUILD (compose down && compose up -d) IS NEEDED TO APPLY THIS.' >> mailcow.conf
|
||||
echo ENABLE_IPV6=${IPV6_BOOL} >> mailcow.conf
|
||||
;;
|
||||
SKIP_CLAMD)
|
||||
echo '# Skip ClamAV (clamd-mailcow) anti-virus (Rspamd will auto-detect a missing ClamAV container) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_CLAMD=n' >> mailcow.conf
|
||||
;;
|
||||
SKIP_OLEFY)
|
||||
echo '# Skip Olefy (olefy-mailcow) anti-virus for Office documents (Rspamd will auto-detect a missing Olefy container) - y/n' >> mailcow.conf
|
||||
echo 'SKIP_OLEFY=n' >> mailcow.conf
|
||||
;;
|
||||
REDISPASS)
|
||||
echo "REDISPASS=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 28)" >> mailcow.conf
|
||||
;;
|
||||
SOGO_URL_ENCRYPTION_KEY)
|
||||
echo '# SOGo URL encryption key (exactly 16 characters, limited to A–Z, a–z, 0–9)' >> mailcow.conf
|
||||
echo '# This key is used to encrypt email addresses within SOGo URLs' >> mailcow.conf
|
||||
echo "SOGO_URL_ENCRYPTION_KEY=$(LC_ALL=C </dev/urandom tr -dc A-Za-z0-9 2>/dev/null | head -c 16)" >> mailcow.conf
|
||||
;;
|
||||
*)
|
||||
echo "${option}=" >> mailcow.conf
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
@@ -48,11 +48,11 @@ if [[ "${SKIP_LETS_ENCRYPT}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
exec $(readlink -f "$0")
|
||||
fi
|
||||
|
||||
log_f "Waiting for Docker API..."
|
||||
until ping dockerapi -c1 > /dev/null; do
|
||||
log_f "Waiting for Controller .."
|
||||
until ping controller -c1 > /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
log_f "Docker API OK"
|
||||
log_f "Controller OK"
|
||||
|
||||
log_f "Waiting for Postfix..."
|
||||
until ping postfix -c1 > /dev/null; do
|
||||
@@ -159,18 +159,6 @@ while true; do
|
||||
fi
|
||||
if [[ ! -f ${ACME_BASE}/acme/account.pem ]]; then
|
||||
log_f "Generating missing Lets Encrypt account key..."
|
||||
if [[ ! -z ${ACME_CONTACT} ]]; then
|
||||
if ! verify_email "${ACME_CONTACT}"; then
|
||||
log_f "Invalid email address, will not start registration!"
|
||||
sleep 365d
|
||||
exec $(readlink -f "$0")
|
||||
else
|
||||
ACME_CONTACT_PARAMETER="--contact mailto:${ACME_CONTACT}"
|
||||
log_f "Valid email address, using ${ACME_CONTACT} for registration"
|
||||
fi
|
||||
else
|
||||
ACME_CONTACT_PARAMETER=""
|
||||
fi
|
||||
openssl genrsa 4096 > ${ACME_BASE}/acme/account.pem
|
||||
else
|
||||
log_f "Using existing Lets Encrypt account key ${ACME_BASE}/acme/account.pem"
|
||||
@@ -218,7 +206,7 @@ while true; do
|
||||
|
||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||
# Fetch certs for autoconfig and autodiscover subdomains
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig')
|
||||
ADDITIONAL_WC_ARR+=('autodiscover' 'autoconfig' 'mta-sts')
|
||||
fi
|
||||
|
||||
if [[ ${SKIP_IP_CHECK} != "y" ]]; then
|
||||
@@ -258,6 +246,25 @@ while true; do
|
||||
done
|
||||
VALIDATED_CONFIG_DOMAINS+=("${VALIDATED_CONFIG_DOMAINS_SUBDOMAINS[*]}")
|
||||
done
|
||||
|
||||
# Fetch alias domains where target domain has MTA-STS enabled
|
||||
if [[ ${AUTODISCOVER_SAN} == "y" ]]; then
|
||||
SQL_ALIAS_DOMAINS=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT ad.alias_domain FROM alias_domain ad INNER JOIN mta_sts m ON ad.target_domain = m.domain WHERE ad.active = 1 AND m.active = 1" -Bs)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
while read alias_domain; do
|
||||
if [[ -z "${alias_domain}" ]]; then
|
||||
# ignore empty lines
|
||||
continue
|
||||
fi
|
||||
# Only add mta-sts subdomain for alias domains
|
||||
if [[ "mta-sts.${alias_domain}" != "${MAILCOW_HOSTNAME}" ]]; then
|
||||
if check_domain "mta-sts.${alias_domain}"; then
|
||||
VALIDATED_CONFIG_DOMAINS+=("mta-sts.${alias_domain}")
|
||||
fi
|
||||
fi
|
||||
done <<< "${SQL_ALIAS_DOMAINS}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if check_domain ${MAILCOW_HOSTNAME}; then
|
||||
@@ -299,7 +306,7 @@ while true; do
|
||||
VALIDATED_CERTIFICATES+=("${CERT_NAME}")
|
||||
|
||||
# obtain server certificate if required
|
||||
ACME_CONTACT_PARAMETER=${ACME_CONTACT_PARAMETER} DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
||||
DOMAINS=${SERVER_SAN_VALIDATED[@]} /srv/obtain-certificate.sh rsa
|
||||
RETURN="$?"
|
||||
if [[ "$RETURN" == "0" ]]; then # 0 = cert created successfully
|
||||
CERT_AMOUNT_CHANGED=1
|
||||
|
||||
@@ -93,8 +93,8 @@ until dig letsencrypt.org +time=3 +tries=1 @unbound > /dev/null; do
|
||||
sleep 2
|
||||
done
|
||||
log_f "Resolver OK"
|
||||
log_f "Using command acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
||||
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} ${ACME_CONTACT_PARAMETER} \
|
||||
log_f "Using command acme-tiny ${DIRECTORY_URL} --account-key ${ACME_BASE}/acme/account.pem --disable-check --csr ${CSR} --acme-dir /var/www/acme/"
|
||||
ACME_RESPONSE=$(acme-tiny ${DIRECTORY_URL} \
|
||||
--account-key ${ACME_BASE}/acme/account.pem \
|
||||
--disable-check \
|
||||
--csr ${CSR} \
|
||||
|
||||
@@ -2,32 +2,32 @@
|
||||
|
||||
# Reading container IDs
|
||||
# Wrapping as array to ensure trimmed content when calling $NGINX etc.
|
||||
NGINX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
NGINX=($(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"nginx-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
DOVECOT=($(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"dovecot-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
POSTFIX=($(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" | tr "\n" " "))
|
||||
|
||||
reload_nginx(){
|
||||
echo "Reloading Nginx..."
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
NGINX_RELOAD_RET=$(curl -X POST --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${NGINX}/exec -d '{"cmd":"reload", "task":"nginx"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${NGINX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Nginx, restarting container..."; restart_container ${NGINX} ; }
|
||||
}
|
||||
|
||||
reload_dovecot(){
|
||||
echo "Reloading Dovecot..."
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
DOVECOT_RELOAD_RET=$(curl -X POST --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${DOVECOT}/exec -d '{"cmd":"reload", "task":"dovecot"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${DOVECOT_RELOAD_RET} != 'success' ]] && { echo "Could not reload Dovecot, restarting container..."; restart_container ${DOVECOT} ; }
|
||||
}
|
||||
|
||||
reload_postfix(){
|
||||
echo "Reloading Postfix..."
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
POSTFIX_RELOAD_RET=$(curl -X POST --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/exec -d '{"cmd":"reload", "task":"postfix"}' --silent -H 'Content-type: application/json' | jq -r .type)
|
||||
[[ ${POSTFIX_RELOAD_RET} != 'success' ]] && { echo "Could not reload Postfix, restarting container..."; restart_container ${POSTFIX} ; }
|
||||
}
|
||||
|
||||
restart_container(){
|
||||
for container in $*; do
|
||||
echo "Restarting ${container}..."
|
||||
C_REST_OUT=$(curl -X POST --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${container}/restart --silent | jq -r '.msg')
|
||||
C_REST_OUT=$(curl -X POST --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${container}/restart --silent | jq -r '.msg')
|
||||
echo "${C_REST_OUT}"
|
||||
done
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
FROM debian:bookworm-slim
|
||||
FROM debian:trixie-slim
|
||||
|
||||
RUN apt update && apt install pigz -y --no-install-recommends
|
||||
RUN apt update && apt install pigz zstd -y --no-install-recommends
|
||||
@@ -1,66 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import signal
|
||||
import ipaddress
|
||||
|
||||
def handle_sigterm(signum, frame):
|
||||
print("Received SIGTERM, exiting gracefully...")
|
||||
sys.exit(0)
|
||||
|
||||
def get_mysql_config(service_name):
|
||||
db_config = {
|
||||
"user": os.getenv("DBUSER") or os.getenv("MYSQL_USER"),
|
||||
"password": os.getenv("DBPASS") or os.getenv("MYSQL_PASSWORD"),
|
||||
"database": os.getenv("DBNAME") or os.getenv("MYSQL_DATABASE"),
|
||||
"connection_timeout": 2,
|
||||
"service_table": "service_settings",
|
||||
"service_types": [service_name]
|
||||
}
|
||||
|
||||
db_host = os.getenv("DB_HOST")
|
||||
if db_host.startswith("/"):
|
||||
db_config["host"] = "localhost"
|
||||
db_config["unix_socket"] = db_host
|
||||
else:
|
||||
db_config["host"] = db_host
|
||||
|
||||
return db_config
|
||||
|
||||
def get_redis_config():
|
||||
redis_config = {
|
||||
"read_host": os.getenv("REDIS_HOST"),
|
||||
"read_port": 6379,
|
||||
"write_host": os.getenv("REDIS_SLAVEOF_IP") or os.getenv("REDIS_HOST"),
|
||||
"write_port": int(os.getenv("REDIS_SLAVEOF_PORT") or 6379),
|
||||
"password": os.getenv("REDISPASS"),
|
||||
"db": 0
|
||||
}
|
||||
|
||||
return redis_config
|
||||
|
||||
def main():
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
container_name = os.getenv("CONTAINER_NAME")
|
||||
service_name = container_name.replace("-mailcow", "").replace("-", "")
|
||||
module_name = f"Bootstrap{service_name.capitalize()}"
|
||||
|
||||
try:
|
||||
mod = __import__(f"modules.{module_name}", fromlist=[module_name])
|
||||
Bootstrap = getattr(mod, module_name)
|
||||
except (ImportError, AttributeError) as e:
|
||||
print(f"Failed to load bootstrap module for: {container_name} → {module_name}")
|
||||
print(str(e))
|
||||
sys.exit(1)
|
||||
|
||||
b = Bootstrap(
|
||||
container=container_name,
|
||||
service=service_name,
|
||||
db_config=get_mysql_config(service_name),
|
||||
redis_config=get_redis_config()
|
||||
)
|
||||
|
||||
b.bootstrap()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,827 +0,0 @@
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import shutil
|
||||
import secrets
|
||||
import string
|
||||
import subprocess
|
||||
import time
|
||||
import socket
|
||||
import re
|
||||
import redis
|
||||
import hashlib
|
||||
import json
|
||||
import psutil
|
||||
import signal
|
||||
from urllib.parse import quote
|
||||
from pathlib import Path
|
||||
import dns.resolver
|
||||
import mysql.connector
|
||||
|
||||
class BootstrapBase:
|
||||
def __init__(self, container, service, db_config, redis_config):
|
||||
self.container = container
|
||||
self.service = service
|
||||
self.db_config = db_config
|
||||
self.redis_config = redis_config
|
||||
|
||||
self.env = None
|
||||
self.env_vars = None
|
||||
self.mysql_conn = None
|
||||
self.redis_connr = None
|
||||
self.redis_connw = None
|
||||
|
||||
def render_config(self, config_dir):
|
||||
"""
|
||||
Renders multiple Jinja2 templates from a config.json file in a given directory.
|
||||
|
||||
Args:
|
||||
config_dir (str or Path): Path to the directory containing config.json
|
||||
|
||||
Behavior:
|
||||
- Renders each template defined in config.json
|
||||
- Writes the result to the specified output path
|
||||
- Also copies the rendered file to: <config_dir>/rendered_configs/<relative_output_path>
|
||||
"""
|
||||
|
||||
config_dir = Path(config_dir)
|
||||
config_path = config_dir / "config.json"
|
||||
|
||||
if not config_path.exists():
|
||||
print(f"config.json not found in: {config_dir}")
|
||||
return
|
||||
|
||||
with config_path.open("r") as f:
|
||||
entries = json.load(f)
|
||||
|
||||
for entry in entries:
|
||||
template_name = entry["template"]
|
||||
output_path = Path(entry["output"])
|
||||
clean_blank_lines = entry.get("clean_blank_lines", False)
|
||||
if_not_exists = entry.get("if_not_exists", False)
|
||||
|
||||
if if_not_exists and output_path.exists():
|
||||
print(f"Skipping {output_path} (already exists)")
|
||||
continue
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
template = self.env.get_template(template_name)
|
||||
except Exception as e:
|
||||
print(f"Template not found: {template_name} ({e})")
|
||||
continue
|
||||
|
||||
rendered = template.render(self.env_vars)
|
||||
|
||||
if clean_blank_lines:
|
||||
rendered = "\n".join(line for line in rendered.splitlines() if line.strip())
|
||||
|
||||
rendered = rendered.replace('\r\n', '\n').replace('\r', '\n')
|
||||
|
||||
with output_path.open("w") as f:
|
||||
f.write(rendered)
|
||||
|
||||
rendered_copy_path = config_dir / "rendered_configs" / output_path.name
|
||||
rendered_copy_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self.copy_file(output_path, rendered_copy_path)
|
||||
|
||||
print(f"Rendered {template_name} → {output_path}")
|
||||
|
||||
def prepare_template_vars(self, overwrite_path, extra_vars = None):
|
||||
"""
|
||||
Loads and merges environment variables for Jinja2 templates from multiple sources, and registers custom template filters.
|
||||
|
||||
This method combines variables from:
|
||||
1. System environment variables
|
||||
2. The MySQL `service_settings` table (filtered by service type if defined)
|
||||
3. An optional `extra_vars` dictionary
|
||||
4. A JSON overwrite file (if it exists at the given path)
|
||||
|
||||
Also registers custom Jinja2 filters.
|
||||
|
||||
Args:
|
||||
overwrite_path (str or Path): Path to a JSON file containing key-value overrides.
|
||||
extra_vars (dict, optional): Additional variables to merge into the environment.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing all resolved template variables.
|
||||
|
||||
Raises:
|
||||
Prints errors if database fetch or JSON parsing fails, but does not raise exceptions.
|
||||
"""
|
||||
|
||||
# 1. setup filters
|
||||
self.env.filters['sha1'] = self.sha1_filter
|
||||
self.env.filters['urlencode'] = self.urlencode_filter
|
||||
self.env.filters['escape_quotes'] = self.escape_quotes_filter
|
||||
|
||||
# 2. Load env vars
|
||||
env_vars = dict(os.environ)
|
||||
|
||||
# 3. Load from MySQL
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
|
||||
if self.db_config['service_types']:
|
||||
placeholders = ','.join(['%s'] * len(self.db_config['service_types']))
|
||||
sql = f"SELECT `key`, `value` FROM {self.db_config['service_table']} WHERE `type` IN ({placeholders})"
|
||||
cursor.execute(sql, self.db_config['service_types'])
|
||||
else:
|
||||
cursor.execute(f"SELECT `key`, `value` FROM {self.db_config['service_table']}")
|
||||
|
||||
for key, value in cursor.fetchall():
|
||||
env_vars[key] = value
|
||||
|
||||
cursor.close()
|
||||
except Exception as e:
|
||||
print(f"Failed to fetch DB service settings: {e}")
|
||||
|
||||
# 4. Load extra vars
|
||||
if extra_vars:
|
||||
env_vars.update(extra_vars)
|
||||
|
||||
# 5. Load overwrites
|
||||
overwrite_path = Path(overwrite_path)
|
||||
if overwrite_path.exists():
|
||||
try:
|
||||
with overwrite_path.open("r") as f:
|
||||
overwrite_data = json.load(f)
|
||||
env_vars.update(overwrite_data)
|
||||
except Exception as e:
|
||||
print(f"Failed to parse overwrites: {e}")
|
||||
|
||||
return env_vars
|
||||
|
||||
def set_timezone(self):
|
||||
"""
|
||||
Sets the system timezone based on the TZ environment variable.
|
||||
|
||||
If the TZ variable is set, writes its value to /etc/timezone.
|
||||
"""
|
||||
|
||||
timezone = os.getenv("TZ")
|
||||
if timezone:
|
||||
with open("/etc/timezone", "w") as f:
|
||||
f.write(timezone + "\n")
|
||||
|
||||
def set_syslog_redis(self):
|
||||
"""
|
||||
Reconfigures syslog-ng to use a Redis slave configuration.
|
||||
|
||||
If the REDIS_SLAVEOF_IP environment variable is set, replaces the syslog-ng config
|
||||
with the Redis slave-specific config.
|
||||
"""
|
||||
|
||||
redis_slave_ip = os.getenv("REDIS_SLAVEOF_IP")
|
||||
if redis_slave_ip:
|
||||
shutil.copy("/etc/syslog-ng/syslog-ng-redis_slave.conf", "/etc/syslog-ng/syslog-ng.conf")
|
||||
|
||||
def rsync_file(self, src, dst, recursive=False, owner=None, mode=None):
|
||||
"""
|
||||
Copies files or directories using rsync, with optional ownership and permissions.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file or directory.
|
||||
dst (str or Path): Destination directory.
|
||||
recursive (bool): If True, copies contents recursively.
|
||||
owner (tuple): Tuple of (user, group) to set ownership.
|
||||
mode (int): File mode (e.g., 0o644) to set permissions after sync.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
dst_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
rsync_cmd = ["rsync", "-a"]
|
||||
if recursive:
|
||||
rsync_cmd.append(str(src_path) + "/")
|
||||
else:
|
||||
rsync_cmd.append(str(src_path))
|
||||
rsync_cmd.append(str(dst_path))
|
||||
|
||||
try:
|
||||
subprocess.run(rsync_cmd, check=True)
|
||||
except Exception as e:
|
||||
print(f"Rsync failed: {e}")
|
||||
|
||||
if owner:
|
||||
self.set_owner(dst_path, *owner, recursive=True)
|
||||
if mode:
|
||||
self.set_permissions(dst_path, mode)
|
||||
|
||||
def set_permissions(self, path, mode):
|
||||
"""
|
||||
Sets file or directory permissions.
|
||||
|
||||
Args:
|
||||
path (str or Path): Path to the file or directory.
|
||||
mode (int): File mode to apply, e.g., 0o644.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
"""
|
||||
|
||||
file_path = Path(path)
|
||||
if not file_path.exists():
|
||||
raise FileNotFoundError(f"Cannot chmod: {file_path} does not exist")
|
||||
os.chmod(file_path, mode)
|
||||
|
||||
def set_owner(self, path, user, group=None, recursive=False):
|
||||
"""
|
||||
Changes ownership of a file or directory.
|
||||
|
||||
Args:
|
||||
path (str or Path): Path to the file or directory.
|
||||
user (str or int): Username or UID for new owner.
|
||||
group (str or int, optional): Group name or GID; defaults to user's group if not provided.
|
||||
recursive (bool): If True and path is a directory, ownership is applied recursively.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
"""
|
||||
|
||||
# Resolve UID
|
||||
uid = int(user) if str(user).isdigit() else pwd.getpwnam(user).pw_uid
|
||||
# Resolve GID
|
||||
if group is not None:
|
||||
gid = int(group) if str(group).isdigit() else grp.getgrnam(group).gr_gid
|
||||
else:
|
||||
gid = uid if isinstance(user, int) or str(user).isdigit() else grp.getgrnam(user).gr_gid
|
||||
|
||||
p = Path(path)
|
||||
if not p.exists():
|
||||
raise FileNotFoundError(f"{path} does not exist")
|
||||
|
||||
if recursive and p.is_dir():
|
||||
for sub_path in p.rglob("*"):
|
||||
os.chown(sub_path, uid, gid)
|
||||
os.chown(p, uid, gid)
|
||||
|
||||
def fix_permissions(self, path, user=None, group=None, mode=None, recursive=False):
|
||||
"""
|
||||
Sets owner and/or permissions on a file or directory.
|
||||
|
||||
Args:
|
||||
path (str or Path): Target path.
|
||||
user (str|int, optional): Username or UID.
|
||||
group (str|int, optional): Group name or GID.
|
||||
mode (int, optional): File mode (e.g. 0o644).
|
||||
recursive (bool): Apply recursively if path is a directory.
|
||||
"""
|
||||
|
||||
if user or group:
|
||||
self.set_owner(path, user, group, recursive)
|
||||
if mode:
|
||||
self.set_permissions(path, mode)
|
||||
|
||||
def move_file(self, src, dst, overwrite=True):
|
||||
"""
|
||||
Moves a file from src to dst, optionally overwriting existing files.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file path.
|
||||
dst (str or Path): Destination path.
|
||||
overwrite (bool): If False, raises error if dst exists.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the source file does not exist.
|
||||
FileExistsError: If the destination file exists and overwrite is False.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
|
||||
if not src_path.exists():
|
||||
raise FileNotFoundError(f"Source file does not exist: {src}")
|
||||
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if dst_path.exists() and not overwrite:
|
||||
raise FileExistsError(f"Destination already exists: {dst} (set overwrite=True to overwrite)")
|
||||
|
||||
shutil.move(str(src_path), str(dst_path))
|
||||
|
||||
def copy_file(self, src, dst, overwrite=True):
|
||||
"""
|
||||
Copies a file from src to dst using shutil.
|
||||
|
||||
Args:
|
||||
src (str or Path): Source file path.
|
||||
dst (str or Path): Destination file path.
|
||||
overwrite (bool): Whether to overwrite the destination if it exists.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the source file doesn't exist.
|
||||
FileExistsError: If the destination exists and overwrite is False.
|
||||
IOError: If the copy operation fails.
|
||||
"""
|
||||
|
||||
src_path = Path(src)
|
||||
dst_path = Path(dst)
|
||||
|
||||
if not src_path.is_file():
|
||||
raise FileNotFoundError(f"Source file not found: {src_path}")
|
||||
|
||||
if dst_path.exists() and not overwrite:
|
||||
raise FileExistsError(f"Destination exists: {dst_path}")
|
||||
|
||||
dst_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
shutil.copy2(src_path, dst_path)
|
||||
|
||||
def remove(self, path, recursive=False, wipe_contents=False, exclude=None):
|
||||
"""
|
||||
Removes a file or directory with optional exclusion logic.
|
||||
|
||||
Args:
|
||||
path (str or Path): The file or directory path to remove.
|
||||
recursive (bool): If True, directories will be removed recursively.
|
||||
wipe_contents (bool): If True and path is a directory, only its contents are removed, not the dir itself.
|
||||
exclude (list[str], optional): List of filenames to exclude from deletion.
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If the path does not exist.
|
||||
ValueError: If a directory is passed without recursive or wipe_contents.
|
||||
"""
|
||||
|
||||
|
||||
path = Path(path)
|
||||
exclude = set(exclude or [])
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Cannot remove: {path} does not exist")
|
||||
|
||||
if wipe_contents and path.is_dir():
|
||||
for child in path.iterdir():
|
||||
if child.name in exclude:
|
||||
continue
|
||||
if child.is_dir():
|
||||
shutil.rmtree(child)
|
||||
else:
|
||||
child.unlink()
|
||||
elif path.is_file():
|
||||
if path.name not in exclude:
|
||||
path.unlink()
|
||||
elif path.is_dir():
|
||||
if recursive:
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
raise ValueError(f"{path} is a directory. Use recursive=True or wipe_contents=True to remove it.")
|
||||
|
||||
def create_dir(self, path):
|
||||
"""
|
||||
Creates a directory if it does not exist.
|
||||
|
||||
If the directory is missing, it will be created along with any necessary parent directories.
|
||||
|
||||
Args:
|
||||
path (str or Path): The directory path to create.
|
||||
"""
|
||||
|
||||
dir_path = Path(path)
|
||||
if not dir_path.exists():
|
||||
print(f"Creating directory: {dir_path}")
|
||||
dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def patch_exists(self, target_file, patch_file, reverse=False):
|
||||
"""
|
||||
Checks whether a patch can be applied (or reversed) to a target file.
|
||||
|
||||
Args:
|
||||
target_file (str): File to test the patch against.
|
||||
patch_file (str): Patch file to apply.
|
||||
reverse (bool): If True, checks whether the patch can be reversed.
|
||||
|
||||
Returns:
|
||||
bool: True if patch is applicable, False otherwise.
|
||||
"""
|
||||
|
||||
cmd = ["patch", "-sfN", "--dry-run", target_file, "<", patch_file]
|
||||
if reverse:
|
||||
cmd.insert(1, "-R")
|
||||
try:
|
||||
result = subprocess.run(
|
||||
" ".join(cmd),
|
||||
shell=True,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
return result.returncode == 0
|
||||
except Exception as e:
|
||||
print(f"Patch dry-run failed: {e}")
|
||||
return False
|
||||
|
||||
def apply_patch(self, target_file, patch_file, reverse=False):
|
||||
"""
|
||||
Applies a patch file to a target file.
|
||||
|
||||
Args:
|
||||
target_file (str): File to be patched.
|
||||
patch_file (str): Patch file containing the diff.
|
||||
reverse (bool): If True, applies the patch in reverse (rollback).
|
||||
|
||||
Logs:
|
||||
Success or failure of the patching operation.
|
||||
"""
|
||||
|
||||
cmd = ["patch", target_file, "<", patch_file]
|
||||
if reverse:
|
||||
cmd.insert(0, "-R")
|
||||
try:
|
||||
subprocess.run(" ".join(cmd), shell=True, check=True)
|
||||
print(f"Applied patch {'(reverse)' if reverse else ''} to {target_file}")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Patch failed: {e}")
|
||||
|
||||
def isYes(self, value):
|
||||
"""
|
||||
Determines whether a given string represents a "yes"-like value.
|
||||
|
||||
Args:
|
||||
value (str): Input string to evaluate.
|
||||
|
||||
Returns:
|
||||
bool: True if value is "yes" or "y" (case-insensitive), otherwise False.
|
||||
"""
|
||||
return value.lower() in ["yes", "y"]
|
||||
|
||||
def is_port_open(self, host, port):
|
||||
"""
|
||||
Checks whether a TCP port is open on a given host.
|
||||
|
||||
Args:
|
||||
host (str): The hostname or IP address to check.
|
||||
port (int): The TCP port number to test.
|
||||
|
||||
Returns:
|
||||
bool: True if the port is open and accepting connections, False otherwise.
|
||||
"""
|
||||
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||
sock.settimeout(1)
|
||||
result = sock.connect_ex((host, port))
|
||||
return result == 0
|
||||
|
||||
def resolve_docker_dns_record(self, hostname, record_type="A"):
|
||||
"""
|
||||
Resolves DNS A or AAAA records for a given hostname.
|
||||
|
||||
Args:
|
||||
hostname (str): The domain to query.
|
||||
record_type (str): "A" for IPv4, "AAAA" for IPv6. Default is "A".
|
||||
|
||||
Returns:
|
||||
list[str]: A list of resolved IP addresses.
|
||||
|
||||
Raises:
|
||||
Exception: If resolution fails or no results are found.
|
||||
"""
|
||||
|
||||
try:
|
||||
resolver = dns.resolver.Resolver()
|
||||
resolver.nameservers = ["127.0.0.11"]
|
||||
answers = resolver.resolve(hostname, record_type)
|
||||
return [answer.to_text() for answer in answers]
|
||||
except Exception as e:
|
||||
raise Exception(f"Failed to resolve {record_type} record for {hostname}: {e}")
|
||||
|
||||
def kill_proc(self, process_name):
|
||||
"""
|
||||
Sends SIGTERM to all running processes matching the given name.
|
||||
|
||||
Args:
|
||||
process_name (str): Name of the process to terminate.
|
||||
|
||||
Returns:
|
||||
int: Number of processes successfully signaled.
|
||||
"""
|
||||
|
||||
killed = 0
|
||||
for proc in psutil.process_iter(['name']):
|
||||
try:
|
||||
if proc.info['name'] == process_name:
|
||||
proc.send_signal(signal.SIGTERM)
|
||||
killed += 1
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
continue
|
||||
return killed
|
||||
|
||||
def connect_mysql(self, socket=None):
|
||||
"""
|
||||
Establishes a connection to the MySQL database using the provided configuration.
|
||||
|
||||
Continuously retries the connection until the database is reachable. Stores
|
||||
the connection in `self.mysql_conn` once successful.
|
||||
|
||||
Logs:
|
||||
Connection status and retry errors to stdout.
|
||||
|
||||
Args:
|
||||
socket (str, optional): Custom UNIX socket path to override the default.
|
||||
"""
|
||||
|
||||
print("Connecting to MySQL...")
|
||||
config = {
|
||||
"host": self.db_config['host'],
|
||||
"user": self.db_config['user'],
|
||||
"password": self.db_config['password'],
|
||||
"database": self.db_config['database'],
|
||||
'connection_timeout': self.db_config['connection_timeout']
|
||||
}
|
||||
if self.db_config['unix_socket']:
|
||||
config["unix_socket"] = socket or self.db_config['unix_socket']
|
||||
|
||||
while True:
|
||||
try:
|
||||
self.mysql_conn = mysql.connector.connect(**config)
|
||||
if self.mysql_conn.is_connected():
|
||||
print("MySQL is up and ready!")
|
||||
break
|
||||
except mysql.connector.Error as e:
|
||||
print(f"Waiting for MySQL... ({e})")
|
||||
time.sleep(2)
|
||||
|
||||
def close_mysql(self):
|
||||
"""
|
||||
Closes the MySQL connection if it's currently open and connected.
|
||||
|
||||
Safe to call even if the connection has already been closed.
|
||||
"""
|
||||
|
||||
if self.mysql_conn and self.mysql_conn.is_connected():
|
||||
self.mysql_conn.close()
|
||||
|
||||
def connect_redis(self, max_retries=10, delay=2):
|
||||
"""
|
||||
Connects to both read and write Redis servers and stores the connections.
|
||||
|
||||
Read server: tries indefinitely until successful.
|
||||
Write server: tries up to `max_retries` before giving up.
|
||||
|
||||
Sets:
|
||||
self.redis_connr: Redis client for read
|
||||
self.redis_connw: Redis client for write
|
||||
"""
|
||||
|
||||
use_rw = self.redis_config['read_host'] == self.redis_config['write_host'] and self.redis_config['read_port'] == self.redis_config['write_port']
|
||||
|
||||
if use_rw:
|
||||
print("Connecting to Redis read server...")
|
||||
else:
|
||||
print("Connecting to Redis server...")
|
||||
|
||||
while True:
|
||||
try:
|
||||
clientr = redis.Redis(
|
||||
host=self.redis_config['read_host'],
|
||||
port=self.redis_config['read_port'],
|
||||
password=self.redis_config['password'],
|
||||
db=self.redis_config['db'],
|
||||
decode_responses=True
|
||||
)
|
||||
if clientr.ping():
|
||||
self.redis_connr = clientr
|
||||
print("Redis read server is up and ready!")
|
||||
if use_rw:
|
||||
break
|
||||
else:
|
||||
self.redis_connw = clientr
|
||||
return
|
||||
except redis.RedisError as e:
|
||||
print(f"Waiting for Redis read... ({e})")
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
print("Connecting to Redis write server...")
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
clientw = redis.Redis(
|
||||
host=self.redis_config['write_host'],
|
||||
port=self.redis_config['write_port'],
|
||||
password=self.redis_config['password'],
|
||||
db=self.redis_config['db'],
|
||||
decode_responses=True
|
||||
)
|
||||
if clientw.ping():
|
||||
self.redis_connw = clientw
|
||||
print("Redis write server is up and ready!")
|
||||
return
|
||||
except redis.RedisError as e:
|
||||
print(f"Waiting for Redis write... (attempt {attempt + 1}/{max_retries}) ({e})")
|
||||
time.sleep(delay)
|
||||
print("Redis write server is unreachable.")
|
||||
|
||||
def close_redis(self):
|
||||
"""
|
||||
Closes the Redis read/write connections if open.
|
||||
"""
|
||||
|
||||
if self.redis_connr:
|
||||
try:
|
||||
self.redis_connr.close()
|
||||
except Exception as e:
|
||||
print(f"Error while closing Redis read connection: {e}")
|
||||
finally:
|
||||
self.redis_connr = None
|
||||
|
||||
if self.redis_connw:
|
||||
try:
|
||||
self.redis_connw.close()
|
||||
except Exception as e:
|
||||
print(f"Error while closing Redis write connection: {e}")
|
||||
finally:
|
||||
self.redis_connw = None
|
||||
|
||||
def wait_for_schema_update(self, init_file_path="init_db.inc.php", check_interval=5):
|
||||
"""
|
||||
Waits until the current database schema version matches the expected version
|
||||
defined in a PHP initialization file.
|
||||
|
||||
Compares the `version` value in the `versions` table for `application = 'db_schema'`
|
||||
with the `$db_version` value extracted from the specified PHP file.
|
||||
|
||||
Args:
|
||||
init_file_path (str): Path to the PHP file containing the expected version string.
|
||||
check_interval (int): Time in seconds to wait between version checks.
|
||||
|
||||
Logs:
|
||||
Current vs. expected schema versions until they match.
|
||||
"""
|
||||
|
||||
print("Checking database schema version...")
|
||||
|
||||
while True:
|
||||
current_version = self._get_current_db_version()
|
||||
expected_version = self._get_expected_schema_version(init_file_path)
|
||||
|
||||
if current_version == expected_version:
|
||||
print(f"DB schema is up to date: {current_version}")
|
||||
break
|
||||
|
||||
print(f"Waiting for schema update... (DB: {current_version}, Expected: {expected_version})")
|
||||
time.sleep(check_interval)
|
||||
|
||||
def wait_for_host(self, host, retry_interval=1.0, count=1):
|
||||
"""
|
||||
Waits for a host to respond to ICMP ping.
|
||||
|
||||
Args:
|
||||
host (str): Hostname or IP to ping.
|
||||
retry_interval (float): Seconds to wait between pings.
|
||||
count (int): Number of ping packets to send per check (default 1).
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["ping", "-c", str(count), host],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL
|
||||
)
|
||||
if result.returncode == 0:
|
||||
print(f"{host} is reachable via ping.")
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
print(f"Waiting for {host}...")
|
||||
time.sleep(retry_interval)
|
||||
|
||||
def wait_for_dns(self, domain, retry_interval=1, timeout=30):
|
||||
"""
|
||||
Waits until the domain resolves via DNS using pure Python (socket).
|
||||
|
||||
Args:
|
||||
domain (str): The domain to resolve.
|
||||
retry_interval (int): Time (seconds) to wait between attempts.
|
||||
timeout (int): Maximum total wait time (seconds).
|
||||
|
||||
Returns:
|
||||
bool: True if resolved, False if timed out.
|
||||
"""
|
||||
|
||||
start = time.time()
|
||||
while True:
|
||||
try:
|
||||
socket.gethostbyname(domain)
|
||||
print(f"{domain} is resolving via DNS.")
|
||||
return True
|
||||
except socket.gaierror:
|
||||
pass
|
||||
|
||||
if time.time() - start > timeout:
|
||||
print(f"DNS resolution for {domain} timed out.")
|
||||
return False
|
||||
|
||||
print(f"Waiting for DNS for {domain}...")
|
||||
time.sleep(retry_interval)
|
||||
|
||||
def _get_current_db_version(self):
|
||||
"""
|
||||
Fetches the current schema version from the database.
|
||||
|
||||
Executes a SELECT query on the `versions` table where `application = 'db_schema'`.
|
||||
|
||||
Returns:
|
||||
str or None: The current schema version as a string, or None if not found or on error.
|
||||
|
||||
Logs:
|
||||
Error message if the query fails.
|
||||
"""
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SELECT version FROM versions WHERE application = 'db_schema'")
|
||||
result = cursor.fetchone()
|
||||
cursor.close()
|
||||
return result[0] if result else None
|
||||
except Exception as e:
|
||||
print(f"Error fetching current DB schema version: {e}")
|
||||
return None
|
||||
|
||||
def _get_expected_schema_version(self, filepath):
|
||||
"""
|
||||
Extracts the expected database schema version from a PHP initialization file.
|
||||
|
||||
Looks for a line in the form of: `$db_version = "..."` and extracts the version string.
|
||||
|
||||
Args:
|
||||
filepath (str): Path to the PHP file containing the `$db_version` definition.
|
||||
|
||||
Returns:
|
||||
str or None: The extracted version string, or None if not found or on error.
|
||||
|
||||
Logs:
|
||||
Error message if the file cannot be read or parsed.
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(filepath, "r") as f:
|
||||
content = f.read()
|
||||
match = re.search(r'\$db_version\s*=\s*"([^"]+)"', content)
|
||||
if match:
|
||||
return match.group(1)
|
||||
except Exception as e:
|
||||
print(f"Error reading expected schema version from {filepath}: {e}")
|
||||
return None
|
||||
|
||||
def rand_pass(self, length=22):
|
||||
"""
|
||||
Generates a secure random password using allowed characters.
|
||||
|
||||
Allowed characters include upper/lowercase letters, digits, underscores, and hyphens.
|
||||
|
||||
Args:
|
||||
length (int): Length of the password to generate. Default is 22.
|
||||
|
||||
Returns:
|
||||
str: A securely generated random password string.
|
||||
"""
|
||||
|
||||
allowed_chars = string.ascii_letters + string.digits + "_-"
|
||||
return ''.join(secrets.choice(allowed_chars) for _ in range(length))
|
||||
|
||||
def run_command(self, command, check=True, shell=False, input_stream=None, log_output=True):
|
||||
"""
|
||||
Executes a shell command and optionally logs output.
|
||||
|
||||
Args:
|
||||
command (str or list): Command to run.
|
||||
check (bool): Raise if non-zero exit.
|
||||
shell (bool): Run in shell.
|
||||
input_stream: stdin stream.
|
||||
log_output (bool): If True, print output.
|
||||
|
||||
Returns:
|
||||
subprocess.CompletedProcess
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
command,
|
||||
shell=shell,
|
||||
check=check,
|
||||
stdin=input_stream,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True
|
||||
)
|
||||
if log_output:
|
||||
if result.stdout:
|
||||
print(result.stdout.strip())
|
||||
if result.stderr:
|
||||
print(result.stderr.strip())
|
||||
return result
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Command failed with exit code {e.returncode}: {e.cmd}")
|
||||
print(e.stderr.strip())
|
||||
if check:
|
||||
raise
|
||||
return e
|
||||
|
||||
def sha1_filter(self, value):
|
||||
return hashlib.sha1(value.encode()).hexdigest()
|
||||
|
||||
def urlencode_filter(self, value):
|
||||
return quote(value, safe='')
|
||||
|
||||
def escape_quotes_filter(self, value):
|
||||
return value.replace('"', r'\"')
|
||||
@@ -1,60 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
class BootstrapClamd(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Skip Clamd if set
|
||||
if self.isYes(os.getenv("SKIP_CLAMD", "")):
|
||||
print("SKIP_CLAMD is set, skipping ClamAV startup...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(1)
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
print("Cleaning up tmp files...")
|
||||
tmp_files = Path("/var/lib/clamav").glob("clamav-*.tmp")
|
||||
for tmp_file in tmp_files:
|
||||
try:
|
||||
self.remove(tmp_file)
|
||||
print(f"Removed: {tmp_file}")
|
||||
except Exception as e:
|
||||
print(f"Failed to remove {tmp_file}: {e}")
|
||||
|
||||
self.create_dir("/run/clamav")
|
||||
self.create_dir("/var/lib/clamav")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Fix permissions
|
||||
self.set_owner("/var/lib/clamav", "clamav", "clamav", recursive=True)
|
||||
self.set_owner("/run/clamav", "clamav", "clamav", recursive=True)
|
||||
self.set_permissions("/var/lib/clamav", 0o755)
|
||||
for item in Path("/var/lib/clamav").glob("*"):
|
||||
self.set_permissions(item, 0o644)
|
||||
self.set_permissions("/run/clamav", 0o750)
|
||||
|
||||
# Copying to /etc/clamav to expose file as-is to administrator
|
||||
self.copy_file("/var/lib/clamav/whitelist.ign2", "/etc/clamav/whitelist.ign2")
|
||||
@@ -1,289 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import pwd
|
||||
import hashlib
|
||||
|
||||
class BootstrapDovecot(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
self.wait_for_schema_update()
|
||||
|
||||
# Connect to Redis
|
||||
self.connect_redis()
|
||||
if self.redis_connw:
|
||||
self.redis_connw.set("DOVECOT_REPL_HEALTH", 1)
|
||||
|
||||
# Wait for DNS
|
||||
self.wait_for_dns("mailcow.email")
|
||||
|
||||
# Create missing directories
|
||||
self.create_dir("/etc/dovecot/sql/")
|
||||
self.create_dir("/etc/dovecot/auth/")
|
||||
self.create_dir("/var/vmail/_garbage")
|
||||
self.create_dir("/var/vmail/sieve")
|
||||
self.create_dir("/etc/sogo")
|
||||
self.create_dir("/var/volatile")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||
"RAND_USER": self.rand_pass(),
|
||||
"RAND_PASS": self.rand_pass(),
|
||||
"RAND_PASS2": self.rand_pass(),
|
||||
"ENV_VARS": dict(os.environ)
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
files = [
|
||||
"/etc/dovecot/mail_plugins",
|
||||
"/etc/dovecot/mail_plugins_imap",
|
||||
"/etc/dovecot/mail_plugins_lmtp",
|
||||
"/templates/quarantine.tpl"
|
||||
]
|
||||
for file in files:
|
||||
self.set_permissions(file, 0o644)
|
||||
|
||||
try:
|
||||
# Migrate old sieve_after file
|
||||
self.move_file("/etc/dovecot/sieve_after", "/var/vmail/sieve/global_sieve_after.sieve")
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
# Cleanup random user maildirs
|
||||
self.remove("/var/vmail/mailcow.local", wipe_contents=True)
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
# Cleanup PIDs
|
||||
self.remove("/tmp/quarantine_notify.pid")
|
||||
except Exception as e:
|
||||
pass
|
||||
try:
|
||||
self.remove("/var/run/dovecot/master.pid")
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
# Check permissions of vmail/index/garbage directories.
|
||||
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
||||
files = [
|
||||
"/var/vmail",
|
||||
"/var/vmail/_garbage",
|
||||
"/var/vmail_index"
|
||||
]
|
||||
for file in files:
|
||||
path = Path(file)
|
||||
try:
|
||||
stat_info = path.stat()
|
||||
current_user = pwd.getpwuid(stat_info.st_uid).pw_name
|
||||
|
||||
if current_user != "vmail":
|
||||
print(f"Ownership of {path} is {current_user}, fixing to vmail:vmail...")
|
||||
self.set_owner(path, user="vmail", group="vmail", recursive=True)
|
||||
else:
|
||||
print(f"Ownership of {path} is already correct (vmail)")
|
||||
except Exception as e:
|
||||
print(f"Error checking ownership of {path}: {e}")
|
||||
|
||||
# Compile sieve scripts
|
||||
files = [
|
||||
"/var/vmail/sieve/global_sieve_before.sieve",
|
||||
"/var/vmail/sieve/global_sieve_after.sieve",
|
||||
"/usr/lib/dovecot/sieve/report-spam.sieve",
|
||||
"/usr/lib/dovecot/sieve/report-ham.sieve",
|
||||
]
|
||||
for file in files:
|
||||
self.run_command(["sievec", file], check=False)
|
||||
|
||||
# Fix permissions
|
||||
for path in Path("/etc/dovecot/sql").glob("*.conf"):
|
||||
self.set_owner(path, "root", "root")
|
||||
self.set_permissions(path, 0o640)
|
||||
|
||||
files = [
|
||||
"/etc/dovecot/auth/passwd-verify.lua",
|
||||
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-sieve*"),
|
||||
*Path("/etc/dovecot/sql").glob("dovecot-dict-sql-quota*")
|
||||
]
|
||||
for file in files:
|
||||
self.set_owner(file, "root", "dovecot")
|
||||
|
||||
self.set_permissions("/etc/dovecot/auth/passwd-verify.lua", 0o640)
|
||||
|
||||
for file in ["/var/vmail/sieve", "/var/volatile", "/var/vmail_index"]:
|
||||
self.set_owner(file, "vmail", "vmail", recursive=True)
|
||||
|
||||
self.run_command(["adduser", "vmail", "tty"])
|
||||
self.run_command(["chmod", "g+rw", "/dev/console"])
|
||||
self.set_owner("/dev/console", "root", "tty")
|
||||
files = [
|
||||
"/usr/lib/dovecot/sieve/rspamd-pipe-ham",
|
||||
"/usr/lib/dovecot/sieve/rspamd-pipe-spam",
|
||||
"/usr/local/bin/imapsync_runner.pl",
|
||||
"/usr/local/bin/imapsync",
|
||||
"/usr/local/bin/trim_logs.sh",
|
||||
"/usr/local/bin/sa-rules.sh",
|
||||
"/usr/local/bin/clean_q_aged.sh",
|
||||
"/usr/local/bin/maildir_gc.sh",
|
||||
"/usr/local/sbin/stop-supervisor.sh",
|
||||
"/usr/local/bin/quota_notify.py",
|
||||
"/usr/local/bin/repl_health.sh",
|
||||
"/usr/local/bin/optimize-fts.sh"
|
||||
]
|
||||
for file in files:
|
||||
self.set_permissions(file, 0o755)
|
||||
|
||||
# Collect SA rules once now
|
||||
self.run_command(["/usr/local/bin/sa-rules.sh"], check=False)
|
||||
|
||||
self.generate_mail_crypt_keys()
|
||||
self.cleanup_imapsync_jobs()
|
||||
self.generate_guid_version()
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
"""
|
||||
Returns a mapping of domains to their certificate directory path.
|
||||
|
||||
Example:
|
||||
{
|
||||
"example.com": "/etc/ssl/mail/example.com/",
|
||||
"www.example.com": "/etc/ssl/mail/example.com/"
|
||||
}
|
||||
"""
|
||||
sni_map = {}
|
||||
base_path = Path("/etc/ssl/mail")
|
||||
if not base_path.exists():
|
||||
return sni_map
|
||||
|
||||
for cert_dir in base_path.iterdir():
|
||||
if not cert_dir.is_dir():
|
||||
continue
|
||||
|
||||
domains_file = cert_dir / "domains"
|
||||
cert_file = cert_dir / "cert.pem"
|
||||
key_file = cert_dir / "key.pem"
|
||||
|
||||
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||
continue
|
||||
|
||||
with open(domains_file, "r") as f:
|
||||
domains = [line.strip() for line in f if line.strip()]
|
||||
for domain in domains:
|
||||
sni_map[domain] = str(cert_dir)
|
||||
|
||||
return sni_map
|
||||
|
||||
def generate_mail_crypt_keys(self):
|
||||
"""
|
||||
Ensures mail_crypt EC keypair exists. Generates if missing. Adjusts permissions.
|
||||
"""
|
||||
|
||||
key_dir = Path("/mail_crypt")
|
||||
priv_key = key_dir / "ecprivkey.pem"
|
||||
pub_key = key_dir / "ecpubkey.pem"
|
||||
|
||||
# Generate keys if they don't exist or are empty
|
||||
if not priv_key.exists() or priv_key.stat().st_size == 0 or \
|
||||
not pub_key.exists() or pub_key.stat().st_size == 0:
|
||||
self.run_command(
|
||||
"openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem",
|
||||
shell=True
|
||||
)
|
||||
self.run_command(
|
||||
"openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem",
|
||||
shell=True
|
||||
)
|
||||
|
||||
# Set ownership to UID 401 (dovecot)
|
||||
self.set_owner(priv_key, user='401')
|
||||
self.set_owner(pub_key, user='401')
|
||||
|
||||
def cleanup_imapsync_jobs(self):
|
||||
"""
|
||||
Cleans up stale imapsync locks and resets running status in the database.
|
||||
|
||||
Deletes the imapsync_busy.lock file if present and sets `is_running` to 0
|
||||
in the `imapsync` table, if it exists.
|
||||
|
||||
Logs:
|
||||
Any issues with file operations or SQL execution.
|
||||
"""
|
||||
|
||||
lock_file = Path("/tmp/imapsync_busy.lock")
|
||||
if lock_file.exists():
|
||||
try:
|
||||
lock_file.unlink()
|
||||
except Exception as e:
|
||||
print(f"Failed to remove lock file: {e}")
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SHOW TABLES LIKE 'imapsync'")
|
||||
result = cursor.fetchone()
|
||||
if result:
|
||||
cursor.execute("UPDATE imapsync SET is_running='0'")
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
except Exception as e:
|
||||
print(f"Error updating imapsync table: {e}")
|
||||
|
||||
def generate_guid_version(self):
|
||||
"""
|
||||
Waits for the `versions` table to be created, then generates a GUID
|
||||
based on the mail hostname and Dovecot's public key and inserts it
|
||||
into the `versions` table.
|
||||
|
||||
If the key or hash is missing or malformed, marks it as INVALID.
|
||||
"""
|
||||
|
||||
try:
|
||||
result = self.run_command(["doveconf", "-P"], check=True, log_output=False)
|
||||
pubkey_path = None
|
||||
for line in result.stdout.splitlines():
|
||||
if "mail_crypt_global_public_key" in line:
|
||||
parts = line.split('<')
|
||||
if len(parts) > 1:
|
||||
pubkey_path = parts[1].strip()
|
||||
break
|
||||
|
||||
if pubkey_path and Path(pubkey_path).exists():
|
||||
with open(pubkey_path, "rb") as key_file:
|
||||
pubkey_data = key_file.read()
|
||||
|
||||
hostname = self.env_vars.get("MAILCOW_HOSTNAME", "mailcow.local").encode("utf-8")
|
||||
concat = hostname + pubkey_data
|
||||
guid = hashlib.sha256(concat).hexdigest()
|
||||
|
||||
if len(guid) == 64:
|
||||
version_value = guid
|
||||
else:
|
||||
version_value = "INVALID"
|
||||
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(
|
||||
"REPLACE INTO versions (application, version) VALUES (%s, %s)",
|
||||
("GUID", version_value)
|
||||
)
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
else:
|
||||
print("Public key not found or unreadable. GUID not generated.")
|
||||
except Exception as e:
|
||||
print(f"Failed to generate or store GUID: {e}")
|
||||
@@ -1,163 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
import time
|
||||
import subprocess
|
||||
|
||||
class BootstrapMysql(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
dbuser = "root"
|
||||
dbpass = os.getenv("MYSQL_ROOT_PASSWORD", "")
|
||||
socket = "/tmp/mysql-temp.sock"
|
||||
|
||||
# Check if mysql has been initialized
|
||||
if os.path.exists("/var/lib/mysql/mysql/db.frm"):
|
||||
print("Starting temporary mysqld for upgrade...")
|
||||
self.start_temporary(socket)
|
||||
|
||||
self.connect_mysql(socket)
|
||||
|
||||
print("Running mysql_upgrade...")
|
||||
self.upgrade_mysql(dbuser, dbpass, socket)
|
||||
print("Checking timezone support with CONVERT_TZ...")
|
||||
self.check_and_import_timezone_support(dbuser, dbpass, socket)
|
||||
|
||||
print("Shutting down temporary mysqld...")
|
||||
self.close_mysql()
|
||||
self.stop_temporary(dbuser, dbpass, socket)
|
||||
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
def start_temporary(self, socket):
|
||||
"""
|
||||
Starts a temporary mysqld process in the background using the given UNIX socket.
|
||||
|
||||
The server is started with networking disabled (--skip-networking).
|
||||
|
||||
Args:
|
||||
socket (str): Path to the UNIX socket file for MySQL to listen on.
|
||||
|
||||
Returns:
|
||||
subprocess.Popen: The running mysqld process object.
|
||||
"""
|
||||
|
||||
return subprocess.Popen([
|
||||
"mysqld",
|
||||
"--user=mysql",
|
||||
"--skip-networking",
|
||||
f"--socket={socket}"
|
||||
])
|
||||
|
||||
def stop_temporary(self, dbuser, dbpass, socket):
|
||||
"""
|
||||
Shuts down the temporary mysqld instance gracefully.
|
||||
|
||||
Uses mariadb-admin to issue a shutdown command to the running server.
|
||||
|
||||
Args:
|
||||
dbuser (str): The MySQL username with shutdown privileges (typically 'root').
|
||||
dbpass (str): The password for the MySQL user.
|
||||
socket (str): Path to the UNIX socket the server is listening on.
|
||||
"""
|
||||
|
||||
self.run_command([
|
||||
"mariadb-admin",
|
||||
"shutdown",
|
||||
f"--socket={socket}",
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}"
|
||||
])
|
||||
|
||||
def upgrade_mysql(self, dbuser, dbpass, socket, max_retries=5, wait_interval=3):
|
||||
"""
|
||||
Executes mysql_upgrade to check and fix any schema or table incompatibilities.
|
||||
|
||||
Retries the upgrade command if it fails, up to a maximum number of attempts.
|
||||
|
||||
Args:
|
||||
dbuser (str): MySQL username with privilege to perform the upgrade.
|
||||
dbpass (str): Password for the MySQL user.
|
||||
socket (str): Path to the MySQL UNIX socket for local communication.
|
||||
max_retries (int): Maximum number of attempts before giving up. Default is 5.
|
||||
wait_interval (int): Number of seconds to wait between retries. Default is 3.
|
||||
|
||||
Returns:
|
||||
bool: True if upgrade succeeded, False if all attempts failed.
|
||||
"""
|
||||
|
||||
retries = 0
|
||||
while retries < max_retries:
|
||||
result = self.run_command([
|
||||
"mysql_upgrade",
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}",
|
||||
f"--socket={socket}"
|
||||
], check=False)
|
||||
|
||||
if result.returncode == 0:
|
||||
print("mysql_upgrade completed successfully.")
|
||||
break
|
||||
else:
|
||||
print(f"mysql_upgrade failed (try {retries+1}/{max_retries})")
|
||||
retries += 1
|
||||
time.sleep(wait_interval)
|
||||
else:
|
||||
print("mysql_upgrade failed after all retries.")
|
||||
return False
|
||||
|
||||
def check_and_import_timezone_support(self, dbuser, dbpass, socket):
|
||||
"""
|
||||
Checks if MySQL supports timezone conversion (CONVERT_TZ).
|
||||
If not, it imports timezone info using mysql_tzinfo_to_sql piped into mariadb.
|
||||
"""
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute("SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC')")
|
||||
result = cursor.fetchone()
|
||||
cursor.close()
|
||||
|
||||
if not result or result[0] is None:
|
||||
print("Timezone conversion failed or returned NULL. Importing timezone info...")
|
||||
|
||||
# Use mysql_tzinfo_to_sql piped into mariadb
|
||||
tz_dump = subprocess.Popen(
|
||||
["mysql_tzinfo_to_sql", "/usr/share/zoneinfo"],
|
||||
stdout=subprocess.PIPE
|
||||
)
|
||||
|
||||
self.run_command([
|
||||
"mariadb",
|
||||
"--socket", socket,
|
||||
"-u", dbuser,
|
||||
f"-p{dbpass}",
|
||||
"mysql"
|
||||
], input_stream=tz_dump.stdout)
|
||||
|
||||
tz_dump.stdout.close()
|
||||
tz_dump.wait()
|
||||
|
||||
print("Timezone info successfully imported.")
|
||||
else:
|
||||
print(f"Timezone support is working. Sample result: {result[0]}")
|
||||
except Exception as e:
|
||||
print(f"Failed to verify or import timezone info: {e}")
|
||||
@@ -1,65 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
|
||||
class BootstrapNginx(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# wait for Hosts
|
||||
php_service = os.getenv("PHPFPM_HOST") or "php-fpm-mailcow"
|
||||
rspamd_service = os.getenv("RSPAMD_HOST") or "rspamd-mailcow"
|
||||
sogo_service = os.getenv("SOGO_HOST")
|
||||
self.wait_for_host(php_service)
|
||||
if not self.isYes(os.getenv("SKIP_RSPAMD", False)):
|
||||
self.wait_for_host(rspamd_service)
|
||||
if not self.isYes(os.getenv("SKIP_SOGO", False)):
|
||||
self.wait_for_host(sogo_service)
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs(),
|
||||
'TRUSTED_PROXIES': [item.strip() for item in os.getenv("TRUSTED_PROXIES", "").split(",") if item.strip()],
|
||||
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in os.getenv("ADDITIONAL_SERVER_NAMES", "").split(",") if item.strip()],
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
ssl_dir = '/etc/ssl/mail/'
|
||||
valid_cert_dirs = []
|
||||
for d in os.listdir(ssl_dir):
|
||||
full_path = os.path.join(ssl_dir, d)
|
||||
if not os.path.isdir(full_path):
|
||||
continue
|
||||
|
||||
cert_path = os.path.join(full_path, 'cert.pem')
|
||||
key_path = os.path.join(full_path, 'key.pem')
|
||||
domains_path = os.path.join(full_path, 'domains')
|
||||
|
||||
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
||||
with open(domains_path, 'r') as file:
|
||||
domains = file.read().strip()
|
||||
domains_list = domains.split()
|
||||
if domains_list and os.getenv("MAILCOW_HOSTNAME", "") not in domains_list:
|
||||
valid_cert_dirs.append({
|
||||
'cert_path': full_path + '/',
|
||||
'domains': domains
|
||||
})
|
||||
|
||||
return valid_cert_dirs
|
||||
@@ -1,202 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
import os
|
||||
import ipaddress
|
||||
|
||||
class BootstrapPhpfpm(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
self.connect_mysql()
|
||||
self.connect_redis()
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
# Prepare Redis and MySQL Database
|
||||
# TODO: move to dockerapi
|
||||
if self.isYes(os.getenv("MASTER", "")):
|
||||
print("We are master, preparing...")
|
||||
self.prepare_redis()
|
||||
self.setup_apikeys(
|
||||
os.getenv("API_ALLOW_FROM", "").strip(),
|
||||
os.getenv("API_KEY", "").strip(),
|
||||
os.getenv("API_KEY_READ_ONLY", "").strip()
|
||||
)
|
||||
self.setup_mysql_events()
|
||||
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
self.copy_file("/usr/local/etc/php/conf.d/opcache-recommended.ini", "/php-conf/opcache-recommended.ini")
|
||||
self.copy_file("/usr/local/etc/php-fpm.d/z-pools.conf", "/php-conf/pools.conf")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/zzz-other.ini", "/php-conf/other.ini")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/upload.ini", "/php-conf/upload.ini")
|
||||
self.copy_file("/usr/local/etc/php/conf.d/session_store.ini", "/php-conf/session_store.ini")
|
||||
|
||||
self.set_owner("/global_sieve", 82, 82, recursive=True)
|
||||
self.set_owner("/web/templates/cache", 82, 82, recursive=True)
|
||||
self.remove("/web/templates/cache", wipe_contents=True, exclude=[".gitkeep"])
|
||||
|
||||
print("Running DB init...")
|
||||
self.run_command(["php", "-c", "/usr/local/etc/php", "-f", "/web/inc/init_db.inc.php"], check=False)
|
||||
|
||||
def prepare_redis(self):
|
||||
print("Setting default Redis keys if missing...")
|
||||
|
||||
# Q_RELEASE_FORMAT
|
||||
if self.redis_connw and self.redis_connr.get("Q_RELEASE_FORMAT") is None:
|
||||
self.redis_connw.set("Q_RELEASE_FORMAT", "raw")
|
||||
|
||||
# Q_MAX_AGE
|
||||
if self.redis_connw and self.redis_connr.get("Q_MAX_AGE") is None:
|
||||
self.redis_connw.set("Q_MAX_AGE", 365)
|
||||
|
||||
# PASSWD_POLICY hash defaults
|
||||
if self.redis_connw and self.redis_connr.hget("PASSWD_POLICY", "length") is None:
|
||||
self.redis_connw.hset("PASSWD_POLICY", mapping={
|
||||
"length": 6,
|
||||
"chars": 0,
|
||||
"special_chars": 0,
|
||||
"lowerupper": 0,
|
||||
"numbers": 0
|
||||
})
|
||||
|
||||
# DOMAIN_MAP
|
||||
print("Rebuilding DOMAIN_MAP from MySQL...")
|
||||
if self.redis_connw:
|
||||
self.redis_connw.delete("DOMAIN_MAP")
|
||||
domains = set()
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
|
||||
cursor.execute("SELECT domain FROM domain")
|
||||
domains.update(row[0] for row in cursor.fetchall())
|
||||
cursor.execute("SELECT alias_domain FROM alias_domain")
|
||||
domains.update(row[0] for row in cursor.fetchall())
|
||||
|
||||
cursor.close()
|
||||
|
||||
if domains:
|
||||
for domain in domains:
|
||||
if self.redis_connw:
|
||||
self.redis_conn.hset("DOMAIN_MAP", domain, 1)
|
||||
print(f"{len(domains)} domains added to DOMAIN_MAP.")
|
||||
else:
|
||||
print("No domains found to insert into DOMAIN_MAP.")
|
||||
except Exception as e:
|
||||
print(f"Failed to rebuild DOMAIN_MAP: {e}")
|
||||
|
||||
def setup_apikeys(self, api_allow_from, api_key_rw, api_key_ro):
|
||||
if not api_allow_from or api_allow_from == "invalid":
|
||||
return
|
||||
|
||||
print("Validating API_ALLOW_FROM IPs...")
|
||||
ip_list = [ip.strip() for ip in api_allow_from.split(",")]
|
||||
validated_ips = []
|
||||
|
||||
for ip in ip_list:
|
||||
try:
|
||||
ipaddress.ip_network(ip, strict=False)
|
||||
validated_ips.append(ip)
|
||||
except ValueError:
|
||||
continue
|
||||
if not validated_ips:
|
||||
print("No valid IPs found in API_ALLOW_FROM")
|
||||
return
|
||||
|
||||
allow_from_str = ",".join(validated_ips)
|
||||
cursor = self.mysql_conn.cursor()
|
||||
try:
|
||||
if api_key_rw and api_key_rw != "invalid":
|
||||
print("Setting RW API key...")
|
||||
cursor.execute("DELETE FROM api WHERE access = 'rw'")
|
||||
cursor.execute(
|
||||
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||
(api_key_rw, 1, allow_from_str, "rw")
|
||||
)
|
||||
|
||||
if api_key_ro and api_key_ro != "invalid":
|
||||
print("Setting RO API key...")
|
||||
cursor.execute("DELETE FROM api WHERE access = 'ro'")
|
||||
cursor.execute(
|
||||
"INSERT INTO api (api_key, active, allow_from, access) VALUES (%s, %s, %s, %s)",
|
||||
(api_key_ro, 1, allow_from_str, "ro")
|
||||
)
|
||||
|
||||
self.mysql_conn.commit()
|
||||
print("API key(s) set successfully.")
|
||||
except Exception as e:
|
||||
print(f"Failed to configure API keys: {e}")
|
||||
self.mysql_conn.rollback()
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def setup_mysql_events(self):
|
||||
print("Creating scheduled MySQL EVENTS...")
|
||||
|
||||
queries = [
|
||||
"DROP EVENT IF EXISTS clean_spamalias;",
|
||||
"""
|
||||
CREATE EVENT clean_spamalias
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP();
|
||||
""",
|
||||
"DROP EVENT IF EXISTS clean_oauth2;",
|
||||
"""
|
||||
CREATE EVENT clean_oauth2
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
BEGIN
|
||||
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
||||
END;
|
||||
""",
|
||||
"DROP EVENT IF EXISTS clean_sasl_log;",
|
||||
"""
|
||||
CREATE EVENT clean_sasl_log
|
||||
ON SCHEDULE EVERY 1 DAY
|
||||
DO
|
||||
BEGIN
|
||||
DELETE sasl_log.* FROM sasl_log
|
||||
LEFT JOIN (
|
||||
SELECT username, service, MAX(datetime) AS lastdate
|
||||
FROM sasl_log
|
||||
GROUP BY username, service
|
||||
) AS last
|
||||
ON sasl_log.username = last.username AND sasl_log.service = last.service
|
||||
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY)
|
||||
AND datetime < lastdate;
|
||||
|
||||
DELETE FROM sasl_log
|
||||
WHERE username NOT IN (SELECT username FROM mailbox)
|
||||
AND datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
||||
END;
|
||||
"""
|
||||
]
|
||||
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
for query in queries:
|
||||
cursor.execute(query)
|
||||
self.mysql_conn.commit()
|
||||
cursor.close()
|
||||
print("MySQL EVENTS created successfully.")
|
||||
except Exception as e:
|
||||
print(f"Failed to create MySQL EVENTS: {e}")
|
||||
self.mysql_conn.rollback()
|
||||
@@ -1,83 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
|
||||
class BootstrapPostfix(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Wait for DNS
|
||||
self.wait_for_dns("mailcow.email")
|
||||
|
||||
self.create_dir("/opt/postfix/conf/sql/")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"VALID_CERT_DIRS": self.get_valid_cert_dirs()
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Set Syslog redis")
|
||||
self.set_syslog_redis()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Create aliases DB
|
||||
self.run_command(["newaliases"])
|
||||
|
||||
# Create SNI Config
|
||||
self.run_command(["postmap", "-F", "hash:/opt/postfix/conf/sni.map"])
|
||||
|
||||
# Fix Postfix permissions
|
||||
self.set_owner("/opt/postfix/conf/sql", user="root", group="postfix", recursive=True)
|
||||
self.set_owner("/opt/postfix/conf/custom_transport.pcre", user="root", group="postfix")
|
||||
for cf_file in Path("/opt/postfix/conf/sql").glob("*.cf"):
|
||||
self.set_permissions(cf_file, 0o640)
|
||||
self.set_permissions("/opt/postfix/conf/custom_transport.pcre", 0o640)
|
||||
self.set_owner("/var/spool/postfix/public", user="root", group="postdrop", recursive=True)
|
||||
self.set_owner("/var/spool/postfix/maildrop", user="root", group="postdrop", recursive=True)
|
||||
self.run_command(["postfix", "set-permissions"], check=False)
|
||||
|
||||
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
||||
pid_file = Path("/var/spool/postfix/pid/master.pid")
|
||||
if pid_file.exists():
|
||||
print(f"Removing stale Postfix PID file: {pid_file}")
|
||||
pid_file.unlink()
|
||||
|
||||
def get_valid_cert_dirs(self):
|
||||
certs = {}
|
||||
base_path = Path("/etc/ssl/mail")
|
||||
if not base_path.exists():
|
||||
return certs
|
||||
|
||||
for cert_dir in base_path.iterdir():
|
||||
if not cert_dir.is_dir():
|
||||
continue
|
||||
|
||||
domains_file = cert_dir / "domains"
|
||||
cert_file = cert_dir / "cert.pem"
|
||||
key_file = cert_dir / "key.pem"
|
||||
|
||||
if not (domains_file.exists() and cert_file.exists() and key_file.exists()):
|
||||
continue
|
||||
|
||||
with open(domains_file, "r") as f:
|
||||
domains = [line.strip() for line in f if line.strip()]
|
||||
if domains:
|
||||
certs[str(cert_dir)] = domains
|
||||
|
||||
return certs
|
||||
@@ -1,132 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import time
|
||||
import platform
|
||||
|
||||
class BootstrapRspamd(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_redis()
|
||||
|
||||
# get dovecot ips
|
||||
dovecot_v4 = []
|
||||
dovecot_v6 = []
|
||||
while not dovecot_v4 and not dovecot_v6:
|
||||
try:
|
||||
dovecot_v4 = self.resolve_docker_dns_record("dovecot-mailcow", "A")
|
||||
dovecot_v6 = self.resolve_docker_dns_record("dovecot-mailcow", "AAAA")
|
||||
except Exception as e:
|
||||
print(e)
|
||||
if not dovecot_v4 and not dovecot_v6:
|
||||
print("Waiting for Dovecot IPs...")
|
||||
time.sleep(3)
|
||||
|
||||
# get rspamd ips
|
||||
rspamd_v4 = []
|
||||
rspamd_v6 = []
|
||||
while not rspamd_v4 and not rspamd_v6:
|
||||
try:
|
||||
rspamd_v4 = self.resolve_docker_dns_record("rspamd-mailcow", "A")
|
||||
rspamd_v6 = self.resolve_docker_dns_record("rspamd-mailcow", "AAAA")
|
||||
except Exception:
|
||||
print(e)
|
||||
if not rspamd_v4 and not rspamd_v6:
|
||||
print("Waiting for Rspamd IPs...")
|
||||
time.sleep(3)
|
||||
|
||||
# wait for Services
|
||||
services = [
|
||||
["php-fpm-mailcow", 9001],
|
||||
["php-fpm-mailcow", 9002]
|
||||
]
|
||||
for service in services:
|
||||
while not self.is_port_open(service[0], service[1]):
|
||||
print(f"Waiting for {service[0]} on port {service[1]}...")
|
||||
time.sleep(1)
|
||||
print(f"Service {service[0]} on port {service[1]} is ready!")
|
||||
|
||||
for dir_path in ["/etc/rspamd/plugins.d", "/etc/rspamd/custom"]:
|
||||
Path(dir_path).mkdir(parents=True, exist_ok=True)
|
||||
for file_path in ["/etc/rspamd/rspamd.conf.local", "/etc/rspamd/rspamd.conf.override"]:
|
||||
Path(file_path).touch(exist_ok=True)
|
||||
self.set_permissions("/var/lib/rspamd", 0o755)
|
||||
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"DOVECOT_V4": dovecot_v4[0],
|
||||
"DOVECOT_V6": dovecot_v6[0],
|
||||
"RSPAMD_V4": rspamd_v4[0],
|
||||
"RSPAMD_V6": rspamd_v6[0],
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
# Fix missing default global maps, if any
|
||||
# These exists in mailcow UI and should not be removed
|
||||
files = [
|
||||
"/etc/rspamd/custom/global_mime_from_blacklist.map",
|
||||
"/etc/rspamd/custom/global_rcpt_blacklist.map",
|
||||
"/etc/rspamd/custom/global_smtp_from_blacklist.map",
|
||||
"/etc/rspamd/custom/global_mime_from_whitelist.map",
|
||||
"/etc/rspamd/custom/global_rcpt_whitelist.map",
|
||||
"/etc/rspamd/custom/global_smtp_from_whitelist.map",
|
||||
"/etc/rspamd/custom/bad_languages.map",
|
||||
"/etc/rspamd/custom/sa-rules",
|
||||
"/etc/rspamd/custom/dovecot_trusted.map",
|
||||
"/etc/rspamd/custom/rspamd_trusted.map",
|
||||
"/etc/rspamd/custom/mailcow_networks.map",
|
||||
"/etc/rspamd/custom/ip_wl.map",
|
||||
"/etc/rspamd/custom/fishy_tlds.map",
|
||||
"/etc/rspamd/custom/bad_words.map",
|
||||
"/etc/rspamd/custom/bad_asn.map",
|
||||
"/etc/rspamd/custom/bad_words_de.map",
|
||||
"/etc/rspamd/custom/bulk_header.map",
|
||||
"/etc/rspamd/custom/bad_header.map"
|
||||
]
|
||||
for file in files:
|
||||
path = Path(file)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.touch(exist_ok=True)
|
||||
|
||||
# Fix permissions
|
||||
paths_rspamd = [
|
||||
"/var/lib/rspamd",
|
||||
"/etc/rspamd/local.d",
|
||||
"/etc/rspamd/override.d",
|
||||
"/etc/rspamd/rspamd.conf.local",
|
||||
"/etc/rspamd/rspamd.conf.override",
|
||||
"/etc/rspamd/plugins.d"
|
||||
]
|
||||
for path in paths_rspamd:
|
||||
self.set_owner(path, "_rspamd", "_rspamd", recursive=True)
|
||||
self.set_owner("/etc/rspamd/custom", "_rspamd", "_rspamd")
|
||||
self.set_permissions("/etc/rspamd/custom", 0o755)
|
||||
|
||||
custom_path = Path("/etc/rspamd/custom")
|
||||
for child in custom_path.iterdir():
|
||||
if child.is_file():
|
||||
self.set_owner(child, 82, 82)
|
||||
self.set_permissions(child, 0o644)
|
||||
|
||||
# Provide additional lua modules
|
||||
arch = platform.machine()
|
||||
self.run_command(["ln", "-s", f"/usr/lib/{arch}-linux-gnu/liblua5.1-cjson.so.0.0.0", "/usr/lib/rspamd/cjson.so"], check=False)
|
||||
@@ -1,138 +0,0 @@
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from modules.BootstrapBase import BootstrapBase
|
||||
from pathlib import Path
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
class BootstrapSogo(BootstrapBase):
|
||||
def bootstrap(self):
|
||||
# Skip SOGo if set
|
||||
if self.isYes(os.getenv("SKIP_SOGO", "")):
|
||||
print("SKIP_SOGO is set, skipping SOGo startup...")
|
||||
time.sleep(365 * 24 * 60 * 60)
|
||||
sys.exit(1)
|
||||
|
||||
# Connect to MySQL
|
||||
self.connect_mysql()
|
||||
|
||||
# Wait until port is free
|
||||
while self.is_port_open(os.getenv("SOGO_HOST"), 20000):
|
||||
print("Port 20000 still in use — terminating sogod...")
|
||||
self.kill_proc("sogod")
|
||||
time.sleep(3)
|
||||
|
||||
# Wait for schema to update to expected version
|
||||
self.wait_for_schema_update(init_file_path="init_db.inc.php")
|
||||
|
||||
# Setup Jinja2 Environment and load vars
|
||||
self.env = Environment(
|
||||
loader=FileSystemLoader([
|
||||
'/service_config/custom_templates',
|
||||
'/service_config/config_templates'
|
||||
]),
|
||||
keep_trailing_newline=True,
|
||||
lstrip_blocks=True,
|
||||
trim_blocks=True
|
||||
)
|
||||
extra_vars = {
|
||||
"SQL_DOMAINS": self.get_domains(),
|
||||
"IAM_SETTINGS": self.get_identity_provider_settings()
|
||||
}
|
||||
self.env_vars = self.prepare_template_vars('/service_config/overwrites.json', extra_vars)
|
||||
|
||||
print("Set Timezone")
|
||||
self.set_timezone()
|
||||
|
||||
print("Set Syslog redis")
|
||||
self.set_syslog_redis()
|
||||
|
||||
print("Render config")
|
||||
self.render_config("/service_config")
|
||||
|
||||
print("Fix permissions")
|
||||
self.set_owner("/var/lib/sogo", "sogo", "sogo", recursive=True)
|
||||
self.set_permissions("/var/lib/sogo/GNUstep/Defaults/sogod.plist", 0o600)
|
||||
|
||||
# Rename custom logo
|
||||
logo_src = Path("/etc/sogo/sogo-full.svg")
|
||||
if logo_src.exists():
|
||||
print("Set Logo")
|
||||
self.move_file(logo_src, "/etc/sogo/custom-fulllogo.svg")
|
||||
|
||||
# Rsync web content
|
||||
print("Syncing web content")
|
||||
self.rsync_file("/usr/lib/GNUstep/SOGo/", "/sogo_web/", recursive=True)
|
||||
|
||||
# Chown backup path
|
||||
self.set_owner("/sogo_backup", "sogo", "sogo", recursive=True)
|
||||
|
||||
def get_domains(self):
|
||||
"""
|
||||
Retrieves a list of domains and their GAL (Global Address List) status.
|
||||
|
||||
Executes a SQL query to select:
|
||||
- `domain`
|
||||
- a human-readable GAL status ("YES" or "NO")
|
||||
- `ldap_gal` as a boolean (True/False)
|
||||
|
||||
Returns:
|
||||
list[dict]: A list of dicts with keys: domain, gal_status, ldap_gal.
|
||||
Example: [{"domain": "example.com", "gal_status": "YES", "ldap_gal": True}]
|
||||
|
||||
Logs:
|
||||
Error messages if the query fails.
|
||||
"""
|
||||
|
||||
query = """
|
||||
SELECT domain,
|
||||
CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal_status,
|
||||
ldap_gal = 1 AS ldap_gal
|
||||
FROM domain;
|
||||
"""
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(query)
|
||||
result = cursor.fetchall()
|
||||
cursor.close()
|
||||
|
||||
return [
|
||||
{
|
||||
"domain": row[0],
|
||||
"gal_status": row[1],
|
||||
"ldap_gal": bool(row[2])
|
||||
}
|
||||
for row in result
|
||||
]
|
||||
except Exception as e:
|
||||
print(f"Error fetching domains: {e}")
|
||||
return []
|
||||
|
||||
def get_identity_provider_settings(self):
|
||||
"""
|
||||
Retrieves all key-value identity provider settings.
|
||||
|
||||
Returns:
|
||||
dict: Settings in the format { key: value }
|
||||
|
||||
Logs:
|
||||
Error messages if the query fails.
|
||||
"""
|
||||
query = "SELECT `key`, `value` FROM identity_provider;"
|
||||
try:
|
||||
cursor = self.mysql_conn.cursor()
|
||||
cursor.execute(query)
|
||||
result = cursor.fetchall()
|
||||
cursor.close()
|
||||
|
||||
iam_settings = {row[0]: row[1] for row in result}
|
||||
|
||||
if iam_settings['authsource'] == "ldap":
|
||||
protocol = "ldaps" if iam_settings.get("use_ssl") else "ldap"
|
||||
starttls = "/????!StartTLS" if iam_settings.get("use_tls") else ""
|
||||
iam_settings['ldap_url'] = f"{protocol}://{iam_settings['host']}:{iam_settings['port']}{starttls}"
|
||||
|
||||
return iam_settings
|
||||
except Exception as e:
|
||||
print(f"Error fetching identity provider settings: {e}")
|
||||
return {}
|
||||
@@ -41,7 +41,7 @@ RUN wget -P /src https://www.clamav.net/downloads/production/clamav-${CLAMD_VERS
|
||||
-D ENABLE_MILTER=ON \
|
||||
-D ENABLE_MAN_PAGES=OFF \
|
||||
-D ENABLE_STATIC_LIB=OFF \
|
||||
-D ENABLE_JSON_SHARED=ON \
|
||||
-D ENABLE_JSON_SHARED=ON \
|
||||
&& cmake --build . \
|
||||
&& make DESTDIR="/clamav" -j$(($(nproc) - 1)) install \
|
||||
&& rm -r "/clamav/usr/lib/pkgconfig/" \
|
||||
@@ -88,42 +88,23 @@ RUN apk upgrade --no-cache \
|
||||
pcre2 \
|
||||
zlib \
|
||||
libgcc \
|
||||
py3-pip \
|
||||
&& addgroup -S "clamav" && \
|
||||
adduser -D -G "clamav" -h "/var/lib/clamav" -s "/bin/false" -S "clamav" && \
|
||||
install -d -m 755 -g "clamav" -o "clamav" "/var/log/clamav" && \
|
||||
chown -R clamav:clamav /var/lib/clamav
|
||||
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
|
||||
|
||||
COPY --from=builder "/clamav" "/"
|
||||
|
||||
# init
|
||||
COPY clamd.sh /clamd.sh
|
||||
RUN chmod +x /sbin/tini
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/clamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
COPY data/Dockerfiles/clamd/clamd.sh /clamd.sh
|
||||
COPY data/Dockerfiles/clamd/healthcheck.sh /healthcheck.sh
|
||||
COPY data/Dockerfiles/clamd/clamdcheck.sh /usr/local/bin
|
||||
# healthcheck
|
||||
COPY healthcheck.sh /healthcheck.sh
|
||||
COPY clamdcheck.sh /usr/local/bin
|
||||
RUN chmod +x /healthcheck.sh
|
||||
RUN chmod +x /usr/local/bin/clamdcheck.sh
|
||||
HEALTHCHECK --start-period=6m CMD "/healthcheck.sh"
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh \
|
||||
/clamd.sh \
|
||||
/healthcheck.sh \
|
||||
/usr/local/bin/clamdcheck.sh \
|
||||
/sbin/tini
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
ENTRYPOINT []
|
||||
CMD ["/sbin/tini", "-g", "--", "/clamd.sh"]
|
||||
@@ -1,5 +1,48 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ "${SKIP_CLAMD}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "SKIP_CLAMD=y, skipping ClamAV..."
|
||||
sleep 365d
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Cleaning up garbage
|
||||
echo "Cleaning up tmp files..."
|
||||
rm -rf /var/lib/clamav/tmp.*
|
||||
|
||||
# Prepare whitelist
|
||||
|
||||
mkdir -p /run/clamav /var/lib/clamav
|
||||
|
||||
if [[ -s /etc/clamav/whitelist.ign2 ]]; then
|
||||
echo "Copying non-empty whitelist.ign2 to /var/lib/clamav/whitelist.ign2"
|
||||
cp /etc/clamav/whitelist.ign2 /var/lib/clamav/whitelist.ign2
|
||||
fi
|
||||
|
||||
if [[ ! -f /var/lib/clamav/whitelist.ign2 ]]; then
|
||||
echo "Creating /var/lib/clamav/whitelist.ign2"
|
||||
cat <<EOF > /var/lib/clamav/whitelist.ign2
|
||||
# Please restart ClamAV after changing signatures
|
||||
Example-Signature.Ignore-1
|
||||
PUA.Win.Trojan.EmbeddedPDF-1
|
||||
PUA.Pdf.Trojan.EmbeddedJavaScript-1
|
||||
PUA.Pdf.Trojan.OpenActionObjectwithJavascript-1
|
||||
EOF
|
||||
fi
|
||||
|
||||
chown clamav:clamav -R /var/lib/clamav /run/clamav
|
||||
|
||||
chmod 755 /var/lib/clamav
|
||||
chmod 644 -R /var/lib/clamav/*
|
||||
chmod 750 /run/clamav
|
||||
|
||||
stat /var/lib/clamav/whitelist.ign2
|
||||
dos2unix /var/lib/clamav/whitelist.ign2
|
||||
sed -i '/^\s*$/d' /var/lib/clamav/whitelist.ign2
|
||||
# Copying to /etc/clamav to expose file as-is to administrator
|
||||
cp -p /var/lib/clamav/whitelist.ign2 /etc/clamav/whitelist.ign2
|
||||
|
||||
|
||||
BACKGROUND_TASKS=()
|
||||
|
||||
echo "Running freshclam..."
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Clamd."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Clamd..."
|
||||
exec "$@"
|
||||
@@ -6,22 +6,29 @@ ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --update --no-cache python3 \
|
||||
bash \
|
||||
py3-pip \
|
||||
openssl \
|
||||
tzdata \
|
||||
py3-psutil \
|
||||
py3-redis \
|
||||
py3-async-timeout \
|
||||
supervisor \
|
||||
curl \
|
||||
&& pip3 install --upgrade pip \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
aiodocker \
|
||||
docker
|
||||
RUN mkdir /app/modules
|
||||
|
||||
COPY data/Dockerfiles/dockerapi/docker-entrypoint.sh /app/
|
||||
COPY data/Dockerfiles/dockerapi/main.py /app/main.py
|
||||
COPY data/Dockerfiles/dockerapi/modules/ /app/modules/
|
||||
COPY mailcow-adm/ /app/mailcow-adm/
|
||||
RUN pip3 install -r /app/mailcow-adm/requirements.txt
|
||||
|
||||
COPY api/ /app/api/
|
||||
|
||||
COPY docker-entrypoint.sh /app/
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
ENTRYPOINT ["/bin/sh", "/app/docker-entrypoint.sh"]
|
||||
CMD ["python", "main.py"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
@@ -36,7 +36,7 @@ async def lifespan(app: FastAPI):
|
||||
if os.environ['REDIS_SLAVEOF_IP'] != "":
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_SLAVEOF_IP']}:{os.environ['REDIS_SLAVEOF_PORT']}/0", password=os.environ['REDISPASS'])
|
||||
else:
|
||||
redis_client = redis = await aioredis.from_url(f"redis://{os.environ['REDIS_HOST']}:6379/0", password=os.environ['REDISPASS'])
|
||||
redis_client = redis = await aioredis.from_url("redis://redis-mailcow:6379/0", password=os.environ['REDISPASS'])
|
||||
|
||||
# Init docker clients
|
||||
sync_docker_client = docker.DockerClient(base_url='unix://var/run/docker.sock', version='auto')
|
||||
@@ -254,8 +254,8 @@ if __name__ == '__main__':
|
||||
app,
|
||||
host="0.0.0.0",
|
||||
port=443,
|
||||
ssl_certfile="/app/dockerapi_cert.pem",
|
||||
ssl_keyfile="/app/dockerapi_key.pem",
|
||||
ssl_certfile="/app/controller_cert.pem",
|
||||
ssl_keyfile="/app/controller_key.pem",
|
||||
log_level="info",
|
||||
loop="none"
|
||||
)
|
||||
9
data/Dockerfiles/controller/docker-entrypoint.sh
Executable file
9
data/Dockerfiles/controller/docker-entrypoint.sh
Executable file
@@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
`openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes \
|
||||
-keyout /app/controller_key.pem \
|
||||
-out /app/controller_cert.pem \
|
||||
-subj /CN=controller/O=mailcow \
|
||||
-addext subjectAltName=DNS:controller`
|
||||
|
||||
exec "$@"
|
||||
61
data/Dockerfiles/controller/mailcow-adm/mailcow-adm.py
Executable file
61
data/Dockerfiles/controller/mailcow-adm/mailcow-adm.py
Executable file
@@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from models.AliasModel import AliasModel
|
||||
from models.MailboxModel import MailboxModel
|
||||
from models.SyncjobModel import SyncjobModel
|
||||
from models.CalendarModel import CalendarModel
|
||||
from models.MailerModel import MailerModel
|
||||
from models.AddressbookModel import AddressbookModel
|
||||
from models.MaildirModel import MaildirModel
|
||||
from models.DomainModel import DomainModel
|
||||
from models.DomainadminModel import DomainadminModel
|
||||
from models.StatusModel import StatusModel
|
||||
|
||||
from modules.Utils import Utils
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
utils = Utils()
|
||||
|
||||
model_map = {
|
||||
MailboxModel.parser_command: MailboxModel,
|
||||
AliasModel.parser_command: AliasModel,
|
||||
SyncjobModel.parser_command: SyncjobModel,
|
||||
CalendarModel.parser_command: CalendarModel,
|
||||
AddressbookModel.parser_command: AddressbookModel,
|
||||
MailerModel.parser_command: MailerModel,
|
||||
MaildirModel.parser_command: MaildirModel,
|
||||
DomainModel.parser_command: DomainModel,
|
||||
DomainadminModel.parser_command: DomainadminModel,
|
||||
StatusModel.parser_command: StatusModel
|
||||
}
|
||||
|
||||
parser = argparse.ArgumentParser(description="mailcow Admin Tool")
|
||||
subparsers = parser.add_subparsers(dest="command", required=True)
|
||||
|
||||
for model in model_map.values():
|
||||
model.add_parser(subparsers)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
for cmd, model_cls in model_map.items():
|
||||
if args.command == cmd and model_cls.has_required_args(args):
|
||||
instance = model_cls(**vars(args))
|
||||
action = getattr(instance, args.object, None)
|
||||
if callable(action):
|
||||
res = action()
|
||||
utils.pprint(res)
|
||||
sys.exit(0)
|
||||
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,140 @@
|
||||
from modules.Sogo import Sogo
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class AddressbookModel(BaseModel):
|
||||
parser_command = "addressbook"
|
||||
required_args = {
|
||||
"add": [["username", "name"]],
|
||||
"delete": [["username", "name"]],
|
||||
"get": [["username", "name"]],
|
||||
"set_acl": [["username", "name", "sharee_email", "acl"]],
|
||||
"get_acl": [["username", "name"]],
|
||||
"delete_acl": [["username", "name", "sharee_email"]],
|
||||
"add_contact": [["username", "name", "contact_name", "contact_email", "type"]],
|
||||
"delete_contact": [["username", "name", "contact_name"]],
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
username=None,
|
||||
name=None,
|
||||
sharee_email=None,
|
||||
acl=None,
|
||||
subscribe=None,
|
||||
ics=None,
|
||||
contact_name=None,
|
||||
contact_email=None,
|
||||
type=None,
|
||||
**kwargs
|
||||
):
|
||||
self.sogo = Sogo(username)
|
||||
|
||||
self.name = name
|
||||
self.acl = acl
|
||||
self.sharee_email = sharee_email
|
||||
self.subscribe = subscribe
|
||||
self.ics = ics
|
||||
self.contact_name = contact_name
|
||||
self.contact_email = contact_email
|
||||
self.type = type
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Add a new addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
return self.sogo.addAddressbook(self.name)
|
||||
|
||||
def set_acl(self):
|
||||
"""
|
||||
Set ACL for the addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.setAddressbookACL(addressbook_id, self.sharee_email, self.acl, self.subscribe)
|
||||
|
||||
def delete_acl(self):
|
||||
"""
|
||||
Delete the addressbook ACL.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.deleteAddressbookACL(addressbook_id, self.sharee_email)
|
||||
|
||||
def get_acl(self):
|
||||
"""
|
||||
Get the ACL for the addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.getAddressbookACL(addressbook_id)
|
||||
|
||||
def add_contact(self):
|
||||
"""
|
||||
Add a new contact to the addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
if self.type == "card":
|
||||
return self.sogo.addAddressbookContact(addressbook_id, self.contact_name, self.contact_email)
|
||||
elif self.type == "list":
|
||||
return self.sogo.addAddressbookContactList(addressbook_id, self.contact_name, self.contact_email)
|
||||
|
||||
def delete_contact(self):
|
||||
"""
|
||||
Delete a contact or contactlist from the addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.deleteAddressbookItem(addressbook_id, self.contact_name)
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Retrieve addressbooks list.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
return self.sogo.getAddressbookList()
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the addressbook.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
addressbook_id = self.sogo.getAddressbookIdByName(self.name)
|
||||
if not addressbook_id:
|
||||
print(f"Addressbook '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.deleteAddressbook(addressbook_id)
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage addressbooks (add, delete, get, set_acl, get_acl, delete_acl, add_contact, delete_contact)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, set_acl, get_acl, delete_acl, add_contact, delete_contact")
|
||||
parser.add_argument("--username", required=True, help="Username of the addressbook owner (e.g. user@example.com)")
|
||||
parser.add_argument("--name", help="Addressbook name")
|
||||
parser.add_argument("--sharee-email", help="Email address to share the addressbook with")
|
||||
parser.add_argument("--acl", help="ACL rights for the sharee (e.g. r, w, rw)")
|
||||
parser.add_argument("--subscribe", action='store_true', help="Subscribe the sharee to the addressbook")
|
||||
parser.add_argument("--contact-name", help="Name of the contact or contactlist to add or delete")
|
||||
parser.add_argument("--contact-email", help="Email address of the contact to add")
|
||||
parser.add_argument("--type", choices=["card", "list"], help="Type of contact to add: card (single contact) or list (distribution list)")
|
||||
|
||||
107
data/Dockerfiles/controller/mailcow-adm/models/AliasModel.py
Normal file
107
data/Dockerfiles/controller/mailcow-adm/models/AliasModel.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class AliasModel(BaseModel):
|
||||
parser_command = "alias"
|
||||
required_args = {
|
||||
"add": [["address", "goto"]],
|
||||
"delete": [["id"]],
|
||||
"get": [["id"]],
|
||||
"edit": [["id"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id=None,
|
||||
address=None,
|
||||
goto=None,
|
||||
active=None,
|
||||
sogo_visible=None,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
self.id = id
|
||||
self.address = address
|
||||
self.goto = goto
|
||||
self.active = active
|
||||
self.sogo_visible = sogo_visible
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
return cls(
|
||||
address=data.get("address"),
|
||||
goto=data.get("goto"),
|
||||
active=data.get("active", None),
|
||||
sogo_visible=data.get("sogo_visible", None)
|
||||
)
|
||||
|
||||
def getAdd(self):
|
||||
"""
|
||||
Get the alias details as a dictionary for adding, sets default values.
|
||||
:return: Dictionary containing alias details.
|
||||
"""
|
||||
|
||||
alias = {
|
||||
"address": self.address,
|
||||
"goto": self.goto,
|
||||
"active": self.active if self.active is not None else 1,
|
||||
"sogo_visible": self.sogo_visible if self.sogo_visible is not None else 0
|
||||
}
|
||||
return {key: value for key, value in alias.items() if value is not None}
|
||||
|
||||
def getEdit(self):
|
||||
"""
|
||||
Get the alias details as a dictionary for editing, sets no default values.
|
||||
:return: Dictionary containing mailbox details.
|
||||
"""
|
||||
|
||||
alias = {
|
||||
"address": self.address,
|
||||
"goto": self.goto,
|
||||
"active": self.active,
|
||||
"sogo_visible": self.sogo_visible
|
||||
}
|
||||
return {key: value for key, value in alias.items() if value is not None}
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getAlias(self.id)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.deleteAlias(self.id)
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.addAlias(self.getAdd())
|
||||
|
||||
def edit(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.editAlias(self.id, self.getEdit())
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage aliases (add, delete, get, edit)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, edit")
|
||||
parser.add_argument("--id", help="Alias object ID (required for get, edit, delete)")
|
||||
parser.add_argument("--address", help="Alias email address (e.g. alias@example.com)")
|
||||
parser.add_argument("--goto", help="Destination address(es), comma-separated (e.g. user1@example.com,user2@example.com)")
|
||||
parser.add_argument("--active", choices=["1", "0"], help="Activate (1) or deactivate (0) the alias")
|
||||
parser.add_argument("--sogo-visible", choices=["1", "0"], help="Show alias in SOGo addressbook (1 = yes, 0 = no)")
|
||||
|
||||
35
data/Dockerfiles/controller/mailcow-adm/models/BaseModel.py
Normal file
35
data/Dockerfiles/controller/mailcow-adm/models/BaseModel.py
Normal file
@@ -0,0 +1,35 @@
|
||||
class BaseModel:
|
||||
parser_command = ""
|
||||
required_args = {}
|
||||
|
||||
@classmethod
|
||||
def has_required_args(cls, args):
|
||||
"""
|
||||
Validate that all required arguments are present.
|
||||
"""
|
||||
object_name = args.object if hasattr(args, "object") else args.get("object")
|
||||
required_lists = cls.required_args.get(object_name, False)
|
||||
|
||||
if not required_lists:
|
||||
return False
|
||||
|
||||
for required_set in required_lists:
|
||||
result = True
|
||||
for required_args in required_set:
|
||||
if isinstance(args, dict):
|
||||
if not args.get(required_args):
|
||||
result = False
|
||||
break
|
||||
elif not hasattr(args, required_args):
|
||||
result = False
|
||||
break
|
||||
if result:
|
||||
break
|
||||
|
||||
if not result:
|
||||
print(f"Required arguments for '{object_name}': {required_lists}")
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
pass
|
||||
111
data/Dockerfiles/controller/mailcow-adm/models/CalendarModel.py
Normal file
111
data/Dockerfiles/controller/mailcow-adm/models/CalendarModel.py
Normal file
@@ -0,0 +1,111 @@
|
||||
from modules.Sogo import Sogo
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class CalendarModel(BaseModel):
|
||||
parser_command = "calendar"
|
||||
required_args = {
|
||||
"add": [["username", "name"]],
|
||||
"delete": [["username", "name"]],
|
||||
"get": [["username"]],
|
||||
"import_ics": [["username", "name", "ics"]],
|
||||
"set_acl": [["username", "name", "sharee_email", "acl"]],
|
||||
"get_acl": [["username", "name"]],
|
||||
"delete_acl": [["username", "name", "sharee_email"]],
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
username=None,
|
||||
name=None,
|
||||
sharee_email=None,
|
||||
acl=None,
|
||||
subscribe=None,
|
||||
ics=None,
|
||||
**kwargs
|
||||
):
|
||||
self.sogo = Sogo(username)
|
||||
|
||||
self.name = name
|
||||
self.acl = acl
|
||||
self.sharee_email = sharee_email
|
||||
self.subscribe = subscribe
|
||||
self.ics = ics
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Add a new calendar.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
return self.sogo.addCalendar(self.name)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete a calendar.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
calendar_id = self.sogo.getCalendarIdByName(self.name)
|
||||
if not calendar_id:
|
||||
print(f"Calendar '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.deleteCalendar(calendar_id)
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the calendar details.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
return self.sogo.getCalendar()
|
||||
|
||||
def set_acl(self):
|
||||
"""
|
||||
Set ACL for the calendar.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
calendar_id = self.sogo.getCalendarIdByName(self.name)
|
||||
if not calendar_id:
|
||||
print(f"Calendar '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.setCalendarACL(calendar_id, self.sharee_email, self.acl, self.subscribe)
|
||||
|
||||
def delete_acl(self):
|
||||
"""
|
||||
Delete the calendar ACL.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
calendar_id = self.sogo.getCalendarIdByName(self.name)
|
||||
if not calendar_id:
|
||||
print(f"Calendar '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.deleteCalendarACL(calendar_id, self.sharee_email)
|
||||
|
||||
def get_acl(self):
|
||||
"""
|
||||
Get the ACL for the calendar.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
calendar_id = self.sogo.getCalendarIdByName(self.name)
|
||||
if not calendar_id:
|
||||
print(f"Calendar '{self.name}' not found for user '{self.username}'.")
|
||||
return None
|
||||
return self.sogo.getCalendarACL(calendar_id)
|
||||
|
||||
def import_ics(self):
|
||||
"""
|
||||
Import a calendar from an ICS file.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
return self.sogo.importCalendar(self.name, self.ics)
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage calendars (add, delete, get, import_ics, set_acl, get_acl, delete_acl)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, import_ics, set_acl, get_acl, delete_acl")
|
||||
parser.add_argument("--username", required=True, help="Username of the calendar owner (e.g. user@example.com)")
|
||||
parser.add_argument("--name", help="Calendar name")
|
||||
parser.add_argument("--ics", help="Path to ICS file for import")
|
||||
parser.add_argument("--sharee-email", help="Email address to share the calendar with")
|
||||
parser.add_argument("--acl", help="ACL rights for the sharee (e.g. r, w, rw)")
|
||||
parser.add_argument("--subscribe", action='store_true', help="Subscribe the sharee to the calendar")
|
||||
162
data/Dockerfiles/controller/mailcow-adm/models/DomainModel.py
Normal file
162
data/Dockerfiles/controller/mailcow-adm/models/DomainModel.py
Normal file
@@ -0,0 +1,162 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class DomainModel(BaseModel):
|
||||
parser_command = "domain"
|
||||
required_args = {
|
||||
"add": [["domain"]],
|
||||
"delete": [["domain"]],
|
||||
"get": [["domain"]],
|
||||
"edit": [["domain"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
domain=None,
|
||||
active=None,
|
||||
aliases=None,
|
||||
backupmx=None,
|
||||
defquota=None,
|
||||
description=None,
|
||||
mailboxes=None,
|
||||
maxquota=None,
|
||||
quota=None,
|
||||
relay_all_recipients=None,
|
||||
rl_frame=None,
|
||||
rl_value=None,
|
||||
restart_sogo=None,
|
||||
tags=None,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
self.domain = domain
|
||||
self.active = active
|
||||
self.aliases = aliases
|
||||
self.backupmx = backupmx
|
||||
self.defquota = defquota
|
||||
self.description = description
|
||||
self.mailboxes = mailboxes
|
||||
self.maxquota = maxquota
|
||||
self.quota = quota
|
||||
self.relay_all_recipients = relay_all_recipients
|
||||
self.rl_frame = rl_frame
|
||||
self.rl_value = rl_value
|
||||
self.restart_sogo = restart_sogo
|
||||
self.tags = tags
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
return cls(
|
||||
domain=data.get("domain"),
|
||||
active=data.get("active", None),
|
||||
aliases=data.get("aliases", None),
|
||||
backupmx=data.get("backupmx", None),
|
||||
defquota=data.get("defquota", None),
|
||||
description=data.get("description", None),
|
||||
mailboxes=data.get("mailboxes", None),
|
||||
maxquota=data.get("maxquota", None),
|
||||
quota=data.get("quota", None),
|
||||
relay_all_recipients=data.get("relay_all_recipients", None),
|
||||
rl_frame=data.get("rl_frame", None),
|
||||
rl_value=data.get("rl_value", None),
|
||||
restart_sogo=data.get("restart_sogo", None),
|
||||
tags=data.get("tags", None)
|
||||
)
|
||||
|
||||
def getAdd(self):
|
||||
"""
|
||||
Get the domain details as a dictionary for adding, sets default values.
|
||||
:return: Dictionary containing domain details.
|
||||
"""
|
||||
domain = {
|
||||
"domain": self.domain,
|
||||
"active": self.active if self.active is not None else 1,
|
||||
"aliases": self.aliases if self.aliases is not None else 400,
|
||||
"backupmx": self.backupmx if self.backupmx is not None else 0,
|
||||
"defquota": self.defquota if self.defquota is not None else 3072,
|
||||
"description": self.description if self.description is not None else "",
|
||||
"mailboxes": self.mailboxes if self.mailboxes is not None else 10,
|
||||
"maxquota": self.maxquota if self.maxquota is not None else 10240,
|
||||
"quota": self.quota if self.quota is not None else 10240,
|
||||
"relay_all_recipients": self.relay_all_recipients if self.relay_all_recipients is not None else 0,
|
||||
"rl_frame": self.rl_frame,
|
||||
"rl_value": self.rl_value,
|
||||
"restart_sogo": self.restart_sogo if self.restart_sogo is not None else 0,
|
||||
"tags": self.tags if self.tags is not None else []
|
||||
}
|
||||
return {key: value for key, value in domain.items() if value is not None}
|
||||
|
||||
def getEdit(self):
|
||||
"""
|
||||
Get the domain details as a dictionary for editing, sets no default values.
|
||||
:return: Dictionary containing domain details.
|
||||
"""
|
||||
domain = {
|
||||
"domain": self.domain,
|
||||
"active": self.active,
|
||||
"aliases": self.aliases,
|
||||
"backupmx": self.backupmx,
|
||||
"defquota": self.defquota,
|
||||
"description": self.description,
|
||||
"mailboxes": self.mailboxes,
|
||||
"maxquota": self.maxquota,
|
||||
"quota": self.quota,
|
||||
"relay_all_recipients": self.relay_all_recipients,
|
||||
"rl_frame": self.rl_frame,
|
||||
"rl_value": self.rl_value,
|
||||
"restart_sogo": self.restart_sogo,
|
||||
"tags": self.tags
|
||||
}
|
||||
return {key: value for key, value in domain.items() if value is not None}
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the domain details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getDomain(self.domain)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the domain from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.deleteDomain(self.domain)
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Add the domain to the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.addDomain(self.getAdd())
|
||||
|
||||
def edit(self):
|
||||
"""
|
||||
Edit the domain in the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.editDomain(self.domain, self.getEdit())
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage domains (add, delete, get, edit)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, edit")
|
||||
parser.add_argument("--domain", required=True, help="Domain name (e.g. domain.tld)")
|
||||
parser.add_argument("--active", choices=["1", "0"], help="Activate (1) or deactivate (0) the domain")
|
||||
parser.add_argument("--aliases", help="Number of aliases allowed for the domain")
|
||||
parser.add_argument("--backupmx", choices=["1", "0"], help="Enable (1) or disable (0) backup MX")
|
||||
parser.add_argument("--defquota", help="Default quota for mailboxes in MB")
|
||||
parser.add_argument("--description", help="Description of the domain")
|
||||
parser.add_argument("--mailboxes", help="Number of mailboxes allowed for the domain")
|
||||
parser.add_argument("--maxquota", help="Maximum quota for the domain in MB")
|
||||
parser.add_argument("--quota", help="Quota used by the domain in MB")
|
||||
parser.add_argument("--relay-all-recipients", choices=["1", "0"], help="Relay all recipients (1 = yes, 0 = no)")
|
||||
parser.add_argument("--rl-frame", help="Rate limit frame (e.g., s, m, h)")
|
||||
parser.add_argument("--rl-value", help="Rate limit value")
|
||||
parser.add_argument("--restart-sogo", help="Restart SOGo after changes (1 = yes, 0 = no)")
|
||||
parser.add_argument("--tags", nargs="*", help="Tags for the domain")
|
||||
|
||||
@@ -0,0 +1,106 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class DomainadminModel(BaseModel):
|
||||
parser_command = "domainadmin"
|
||||
required_args = {
|
||||
"add": [["username", "domains", "password"]],
|
||||
"delete": [["username"]],
|
||||
"get": [["username"]],
|
||||
"edit": [["username"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
username=None,
|
||||
domains=None,
|
||||
password=None,
|
||||
active=None,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
self.username = username
|
||||
self.domains = domains
|
||||
self.password = password
|
||||
self.password2 = password
|
||||
self.active = active
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
return cls(
|
||||
username=data.get("username"),
|
||||
domains=data.get("domains"),
|
||||
password=data.get("password"),
|
||||
password2=data.get("password"),
|
||||
active=data.get("active", None),
|
||||
)
|
||||
|
||||
def getAdd(self):
|
||||
"""
|
||||
Get the domain admin details as a dictionary for adding, sets default values.
|
||||
:return: Dictionary containing domain admin details.
|
||||
"""
|
||||
domainadmin = {
|
||||
"username": self.username,
|
||||
"domains": self.domains,
|
||||
"password": self.password,
|
||||
"password2": self.password2,
|
||||
"active": self.active if self.active is not None else "1"
|
||||
}
|
||||
return {key: value for key, value in domainadmin.items() if value is not None}
|
||||
|
||||
def getEdit(self):
|
||||
"""
|
||||
Get the domain admin details as a dictionary for editing, sets no default values.
|
||||
:return: Dictionary containing domain admin details.
|
||||
"""
|
||||
domainadmin = {
|
||||
"username": self.username,
|
||||
"domains": self.domains,
|
||||
"password": self.password,
|
||||
"password2": self.password2,
|
||||
"active": self.active
|
||||
}
|
||||
return {key: value for key, value in domainadmin.items() if value is not None}
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the domain admin details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getDomainadmin(self.username)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Delete the domain admin from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.deleteDomainadmin(self.username)
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Add the domain admin to the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.addDomainadmin(self.getAdd())
|
||||
|
||||
def edit(self):
|
||||
"""
|
||||
Edit the domain admin in the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.editDomainadmin(self.username, self.getEdit())
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage domain admins (add, delete, get, edit)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, edit")
|
||||
parser.add_argument("--username", help="Username for the domain admin")
|
||||
parser.add_argument("--domains", help="Comma-separated list of domains")
|
||||
parser.add_argument("--password", help="Password for the domain admin")
|
||||
parser.add_argument("--active", choices=["1", "0"], help="Activate (1) or deactivate (0) the domain admin")
|
||||
|
||||
164
data/Dockerfiles/controller/mailcow-adm/models/MailboxModel.py
Normal file
164
data/Dockerfiles/controller/mailcow-adm/models/MailboxModel.py
Normal file
@@ -0,0 +1,164 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class MailboxModel(BaseModel):
|
||||
parser_command = "mailbox"
|
||||
required_args = {
|
||||
"add": [["username", "password"]],
|
||||
"delete": [["username"]],
|
||||
"get": [["username"]],
|
||||
"edit": [["username"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
password=None,
|
||||
username=None,
|
||||
domain=None,
|
||||
local_part=None,
|
||||
active=None,
|
||||
sogo_access=None,
|
||||
name=None,
|
||||
authsource=None,
|
||||
quota=None,
|
||||
force_pw_update=None,
|
||||
tls_enforce_in=None,
|
||||
tls_enforce_out=None,
|
||||
tags=None,
|
||||
sender_acl=None,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
if username is not None and "@" in username:
|
||||
self.username = username
|
||||
self.local_part, self.domain = username.split("@")
|
||||
else:
|
||||
self.username = f"{local_part}@{domain}"
|
||||
self.local_part = local_part
|
||||
self.domain = domain
|
||||
|
||||
self.password = password
|
||||
self.password2 = password
|
||||
self.active = active
|
||||
self.sogo_access = sogo_access
|
||||
self.name = name
|
||||
self.authsource = authsource
|
||||
self.quota = quota
|
||||
self.force_pw_update = force_pw_update
|
||||
self.tls_enforce_in = tls_enforce_in
|
||||
self.tls_enforce_out = tls_enforce_out
|
||||
self.tags = tags
|
||||
self.sender_acl = sender_acl
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
return cls(
|
||||
domain=data.get("domain"),
|
||||
local_part=data.get("local_part"),
|
||||
password=data.get("password"),
|
||||
password2=data.get("password"),
|
||||
active=data.get("active", None),
|
||||
sogo_access=data.get("sogo_access", None),
|
||||
name=data.get("name", None),
|
||||
authsource=data.get("authsource", None),
|
||||
quota=data.get("quota", None),
|
||||
force_pw_update=data.get("force_pw_update", None),
|
||||
tls_enforce_in=data.get("tls_enforce_in", None),
|
||||
tls_enforce_out=data.get("tls_enforce_out", None),
|
||||
tags=data.get("tags", None),
|
||||
sender_acl=data.get("sender_acl", None)
|
||||
)
|
||||
|
||||
def getAdd(self):
|
||||
"""
|
||||
Get the mailbox details as a dictionary for adding, sets default values.
|
||||
:return: Dictionary containing mailbox details.
|
||||
"""
|
||||
|
||||
mailbox = {
|
||||
"domain": self.domain,
|
||||
"local_part": self.local_part,
|
||||
"password": self.password,
|
||||
"password2": self.password2,
|
||||
"active": self.active if self.active is not None else 1,
|
||||
"name": self.name if self.name is not None else "",
|
||||
"authsource": self.authsource if self.authsource is not None else "mailcow",
|
||||
"quota": self.quota if self.quota is not None else 0,
|
||||
"force_pw_update": self.force_pw_update if self.force_pw_update is not None else 0,
|
||||
"tls_enforce_in": self.tls_enforce_in if self.tls_enforce_in is not None else 0,
|
||||
"tls_enforce_out": self.tls_enforce_out if self.tls_enforce_out is not None else 0,
|
||||
"tags": self.tags if self.tags is not None else []
|
||||
}
|
||||
return {key: value for key, value in mailbox.items() if value is not None}
|
||||
|
||||
def getEdit(self):
|
||||
"""
|
||||
Get the mailbox details as a dictionary for editing, sets no default values.
|
||||
:return: Dictionary containing mailbox details.
|
||||
"""
|
||||
|
||||
mailbox = {
|
||||
"domain": self.domain,
|
||||
"local_part": self.local_part,
|
||||
"password": self.password,
|
||||
"password2": self.password2,
|
||||
"active": self.active,
|
||||
"name": self.name,
|
||||
"authsource": self.authsource,
|
||||
"quota": self.quota,
|
||||
"force_pw_update": self.force_pw_update,
|
||||
"tls_enforce_in": self.tls_enforce_in,
|
||||
"tls_enforce_out": self.tls_enforce_out,
|
||||
"tags": self.tags
|
||||
}
|
||||
return {key: value for key, value in mailbox.items() if value is not None}
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getMailbox(self.username)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.deleteMailbox(self.username)
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.addMailbox(self.getAdd())
|
||||
|
||||
def edit(self):
|
||||
"""
|
||||
Get the mailbox details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.editMailbox(self.username, self.getEdit())
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage mailboxes (add, delete, get, edit)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, edit")
|
||||
parser.add_argument("--username", help="Full email address of the mailbox (e.g. user@example.com)")
|
||||
parser.add_argument("--password", help="Password for the mailbox (required for add)")
|
||||
parser.add_argument("--active", choices=["1", "0"], help="Activate (1) or deactivate (0) the mailbox")
|
||||
parser.add_argument("--sogo-access", choices=["1", "0"], help="Redirect mailbox to SOGo after web login (1 = yes, 0 = no)")
|
||||
parser.add_argument("--name", help="Display name of the mailbox owner")
|
||||
parser.add_argument("--authsource", help="Authentication source (default: mailcow)")
|
||||
parser.add_argument("--quota", help="Mailbox quota in bytes (0 = unlimited)")
|
||||
parser.add_argument("--force-pw-update", choices=["1", "0"], help="Force password update on next login (1 = yes, 0 = no)")
|
||||
parser.add_argument("--tls-enforce-in", choices=["1", "0"], help="Enforce TLS for incoming emails (1 = yes, 0 = no)")
|
||||
parser.add_argument("--tls-enforce-out", choices=["1", "0"], help="Enforce TLS for outgoing emails (1 = yes, 0 = no)")
|
||||
parser.add_argument("--tags", help="Comma-separated list of tags for the mailbox")
|
||||
parser.add_argument("--sender-acl", help="Comma-separated list of allowed sender addresses for this mailbox")
|
||||
|
||||
@@ -0,0 +1,67 @@
|
||||
from modules.Dovecot import Dovecot
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class MaildirModel(BaseModel):
|
||||
parser_command = "maildir"
|
||||
required_args = {
|
||||
"encrypt": [],
|
||||
"decrypt": [],
|
||||
"restore": [["username", "item"], ["list"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
username=None,
|
||||
source=None,
|
||||
item=None,
|
||||
overwrite=None,
|
||||
list=None,
|
||||
**kwargs
|
||||
):
|
||||
self.dovecot = Dovecot()
|
||||
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
self.username = username
|
||||
self.source = source
|
||||
self.item = item
|
||||
self.overwrite = overwrite
|
||||
self.list = list
|
||||
|
||||
def encrypt(self):
|
||||
"""
|
||||
Encrypt the maildir for the specified user or all.
|
||||
:return: Response from Dovecot.
|
||||
"""
|
||||
return self.dovecot.encryptMaildir(self.source_dir, self.output_dir)
|
||||
|
||||
def decrypt(self):
|
||||
"""
|
||||
Decrypt the maildir for the specified user or all.
|
||||
:return: Response from Dovecot.
|
||||
"""
|
||||
return self.dovecot.decryptMaildir(self.source_dir, self.output_dir)
|
||||
|
||||
def restore(self):
|
||||
"""
|
||||
Restore or List maildir data for the specified user.
|
||||
:return: Response from Dovecot.
|
||||
"""
|
||||
if self.list:
|
||||
return self.dovecot.listDeletedMaildirs()
|
||||
return self.dovecot.restoreMaildir(self.username, self.item)
|
||||
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage maildir (encrypt, decrypt, restore)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: encrypt, decrypt, restore")
|
||||
parser.add_argument("--item", help="Item to restore")
|
||||
parser.add_argument("--username", help="Username to restore the item to")
|
||||
parser.add_argument("--list", action="store_true", help="List items to restore")
|
||||
parser.add_argument("--source-dir", help="Path to the source maildir to import/encrypt/decrypt")
|
||||
parser.add_argument("--output-dir", help="Directory to store encrypted/decrypted files inside the Dovecot container")
|
||||
@@ -0,0 +1,62 @@
|
||||
import json
|
||||
from models.BaseModel import BaseModel
|
||||
from modules.Mailer import Mailer
|
||||
|
||||
class MailerModel(BaseModel):
|
||||
parser_command = "mail"
|
||||
required_args = {
|
||||
"send": [["sender", "recipient", "subject", "body"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sender=None,
|
||||
recipient=None,
|
||||
subject=None,
|
||||
body=None,
|
||||
context=None,
|
||||
**kwargs
|
||||
):
|
||||
self.sender = sender
|
||||
self.recipient = recipient
|
||||
self.subject = subject
|
||||
self.body = body
|
||||
self.context = context
|
||||
|
||||
def send(self):
|
||||
if self.context is not None:
|
||||
try:
|
||||
self.context = json.loads(self.context)
|
||||
except json.JSONDecodeError as e:
|
||||
return f"Invalid context JSON: {e}"
|
||||
else:
|
||||
self.context = {}
|
||||
|
||||
mailer = Mailer(
|
||||
smtp_host="postfix-mailcow",
|
||||
smtp_port=25,
|
||||
username=self.sender,
|
||||
password="",
|
||||
use_tls=True
|
||||
)
|
||||
res = mailer.send_mail(
|
||||
subject=self.subject,
|
||||
from_addr=self.sender,
|
||||
to_addrs=self.recipient.split(","),
|
||||
template=self.body,
|
||||
context=self.context
|
||||
)
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Send emails via SMTP"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: send")
|
||||
parser.add_argument("--sender", required=True, help="Email sender address")
|
||||
parser.add_argument("--recipient", required=True, help="Email recipient address (comma-separated for multiple)")
|
||||
parser.add_argument("--subject", required=True, help="Email subject")
|
||||
parser.add_argument("--body", required=True, help="Email body (Jinja2 template supported)")
|
||||
parser.add_argument("--context", help="Context for Jinja2 template rendering (JSON format)")
|
||||
@@ -0,0 +1,45 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class StatusModel(BaseModel):
|
||||
parser_command = "status"
|
||||
required_args = {
|
||||
"version": [[]],
|
||||
"vmail": [[]],
|
||||
"containers": [[]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
def version(self):
|
||||
"""
|
||||
Get the version of the mailcow instance.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getStatusVersion()
|
||||
|
||||
def vmail(self):
|
||||
"""
|
||||
Get the vmail details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getStatusVmail()
|
||||
|
||||
def containers(self):
|
||||
"""
|
||||
Get the status of containers in the mailcow instance.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getStatusContainers()
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Get information about mailcow (version, vmail, containers)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: version, vmail, containers")
|
||||
221
data/Dockerfiles/controller/mailcow-adm/models/SyncjobModel.py
Normal file
221
data/Dockerfiles/controller/mailcow-adm/models/SyncjobModel.py
Normal file
@@ -0,0 +1,221 @@
|
||||
from modules.Mailcow import Mailcow
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
class SyncjobModel(BaseModel):
|
||||
parser_command = "syncjob"
|
||||
required_args = {
|
||||
"add": [["username", "host1", "port1", "user1", "password1", "enc1"]],
|
||||
"delete": [["id"]],
|
||||
"get": [["username"]],
|
||||
"edit": [["id"]],
|
||||
"run": [["id"]]
|
||||
}
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id=None,
|
||||
username=None,
|
||||
host1=None,
|
||||
port1=None,
|
||||
user1=None,
|
||||
password1=None,
|
||||
enc1=None,
|
||||
mins_interval=None,
|
||||
subfolder2=None,
|
||||
maxage=None,
|
||||
maxbytespersecond=None,
|
||||
timeout1=None,
|
||||
timeout2=None,
|
||||
exclude=None,
|
||||
custom_parameters=None,
|
||||
delete2duplicates=None,
|
||||
delete1=None,
|
||||
delete2=None,
|
||||
automap=None,
|
||||
skipcrossduplicates=None,
|
||||
subscribeall=None,
|
||||
active=None,
|
||||
force=None,
|
||||
**kwargs
|
||||
):
|
||||
self.mailcow = Mailcow()
|
||||
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
self.id = id
|
||||
self.username = username
|
||||
self.host1 = host1
|
||||
self.port1 = port1
|
||||
self.user1 = user1
|
||||
self.password1 = password1
|
||||
self.enc1 = enc1
|
||||
self.mins_interval = mins_interval
|
||||
self.subfolder2 = subfolder2
|
||||
self.maxage = maxage
|
||||
self.maxbytespersecond = maxbytespersecond
|
||||
self.timeout1 = timeout1
|
||||
self.timeout2 = timeout2
|
||||
self.exclude = exclude
|
||||
self.custom_parameters = custom_parameters
|
||||
self.delete2duplicates = delete2duplicates
|
||||
self.delete1 = delete1
|
||||
self.delete2 = delete2
|
||||
self.automap = automap
|
||||
self.skipcrossduplicates = skipcrossduplicates
|
||||
self.subscribeall = subscribeall
|
||||
self.active = active
|
||||
self.force = force
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data):
|
||||
return cls(
|
||||
username=data.get("username"),
|
||||
host1=data.get("host1"),
|
||||
port1=data.get("port1"),
|
||||
user1=data.get("user1"),
|
||||
password1=data.get("password1"),
|
||||
enc1=data.get("enc1"),
|
||||
mins_interval=data.get("mins_interval", None),
|
||||
subfolder2=data.get("subfolder2", None),
|
||||
maxage=data.get("maxage", None),
|
||||
maxbytespersecond=data.get("maxbytespersecond", None),
|
||||
timeout1=data.get("timeout1", None),
|
||||
timeout2=data.get("timeout2", None),
|
||||
exclude=data.get("exclude", None),
|
||||
custom_parameters=data.get("custom_parameters", None),
|
||||
delete2duplicates=data.get("delete2duplicates", None),
|
||||
delete1=data.get("delete1", None),
|
||||
delete2=data.get("delete2", None),
|
||||
automap=data.get("automap", None),
|
||||
skipcrossduplicates=data.get("skipcrossduplicates", None),
|
||||
subscribeall=data.get("subscribeall", None),
|
||||
active=data.get("active", None),
|
||||
)
|
||||
|
||||
def getAdd(self):
|
||||
"""
|
||||
Get the sync job details as a dictionary for adding, sets default values.
|
||||
:return: Dictionary containing sync job details.
|
||||
"""
|
||||
syncjob = {
|
||||
"username": self.username,
|
||||
"host1": self.host1,
|
||||
"port1": self.port1,
|
||||
"user1": self.user1,
|
||||
"password1": self.password1,
|
||||
"enc1": self.enc1,
|
||||
"mins_interval": self.mins_interval if self.mins_interval is not None else 20,
|
||||
"subfolder2": self.subfolder2 if self.subfolder2 is not None else "",
|
||||
"maxage": self.maxage if self.maxage is not None else 0,
|
||||
"maxbytespersecond": self.maxbytespersecond if self.maxbytespersecond is not None else 0,
|
||||
"timeout1": self.timeout1 if self.timeout1 is not None else 600,
|
||||
"timeout2": self.timeout2 if self.timeout2 is not None else 600,
|
||||
"exclude": self.exclude if self.exclude is not None else "(?i)spam|(?i)junk",
|
||||
"custom_parameters": self.custom_parameters if self.custom_parameters is not None else "",
|
||||
"delete2duplicates": 1 if self.delete2duplicates else 0,
|
||||
"delete1": 1 if self.delete1 else 0,
|
||||
"delete2": 1 if self.delete2 else 0,
|
||||
"automap": 1 if self.automap else 0,
|
||||
"skipcrossduplicates": 1 if self.skipcrossduplicates else 0,
|
||||
"subscribeall": 1 if self.subscribeall else 0,
|
||||
"active": 1 if self.active else 0
|
||||
}
|
||||
return {key: value for key, value in syncjob.items() if value is not None}
|
||||
|
||||
def getEdit(self):
|
||||
"""
|
||||
Get the sync job details as a dictionary for editing, sets no default values.
|
||||
:return: Dictionary containing sync job details.
|
||||
"""
|
||||
syncjob = {
|
||||
"username": self.username,
|
||||
"host1": self.host1,
|
||||
"port1": self.port1,
|
||||
"user1": self.user1,
|
||||
"password1": self.password1,
|
||||
"enc1": self.enc1,
|
||||
"mins_interval": self.mins_interval,
|
||||
"subfolder2": self.subfolder2,
|
||||
"maxage": self.maxage,
|
||||
"maxbytespersecond": self.maxbytespersecond,
|
||||
"timeout1": self.timeout1,
|
||||
"timeout2": self.timeout2,
|
||||
"exclude": self.exclude,
|
||||
"custom_parameters": self.custom_parameters,
|
||||
"delete2duplicates": self.delete2duplicates,
|
||||
"delete1": self.delete1,
|
||||
"delete2": self.delete2,
|
||||
"automap": self.automap,
|
||||
"skipcrossduplicates": self.skipcrossduplicates,
|
||||
"subscribeall": self.subscribeall,
|
||||
"active": self.active
|
||||
}
|
||||
return {key: value for key, value in syncjob.items() if value is not None}
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Get the sync job details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.getSyncjob(self.username)
|
||||
|
||||
def delete(self):
|
||||
"""
|
||||
Get the sync job details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.deleteSyncjob(self.id)
|
||||
|
||||
def add(self):
|
||||
"""
|
||||
Get the sync job details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.addSyncjob(self.getAdd())
|
||||
|
||||
def edit(self):
|
||||
"""
|
||||
Get the sync job details from the mailcow API.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.editSyncjob(self.id, self.getEdit())
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Run the sync job.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.mailcow.runSyncjob(self.id, force=self.force)
|
||||
|
||||
@classmethod
|
||||
def add_parser(cls, subparsers):
|
||||
parser = subparsers.add_parser(
|
||||
cls.parser_command,
|
||||
help="Manage sync jobs (add, delete, get, edit)"
|
||||
)
|
||||
parser.add_argument("object", choices=list(cls.required_args.keys()), help="Action to perform: add, delete, get, edit")
|
||||
parser.add_argument("--id", help="Syncjob object ID (required for edit, delete, run)")
|
||||
parser.add_argument("--username", help="Target mailbox username (e.g. user@example.com)")
|
||||
parser.add_argument("--host1", help="Source IMAP server hostname")
|
||||
parser.add_argument("--port1", help="Source IMAP server port")
|
||||
parser.add_argument("--user1", help="Source IMAP account username")
|
||||
parser.add_argument("--password1", help="Source IMAP account password")
|
||||
parser.add_argument("--enc1", choices=["PLAIN", "SSL", "TLS"], help="Encryption for source server connection")
|
||||
parser.add_argument("--mins-interval", help="Sync interval in minutes (default: 20)")
|
||||
parser.add_argument("--subfolder2", help="Destination subfolder (default: empty)")
|
||||
parser.add_argument("--maxage", help="Maximum mail age in days (default: 0 = unlimited)")
|
||||
parser.add_argument("--maxbytespersecond", help="Maximum bandwidth in bytes/sec (default: 0 = unlimited)")
|
||||
parser.add_argument("--timeout1", help="Timeout for source server in seconds (default: 600)")
|
||||
parser.add_argument("--timeout2", help="Timeout for destination server in seconds (default: 600)")
|
||||
parser.add_argument("--exclude", help="Regex pattern to exclude folders (default: (?i)spam|(?i)junk)")
|
||||
parser.add_argument("--custom-parameters", help="Additional imapsync parameters")
|
||||
parser.add_argument("--delete2duplicates", choices=["1", "0"], help="Delete duplicates on destination (1 = yes, 0 = no)")
|
||||
parser.add_argument("--del1", choices=["1", "0"], help="Delete mails on source after sync (1 = yes, 0 = no)")
|
||||
parser.add_argument("--del2", choices=["1", "0"], help="Delete mails on destination after sync (1 = yes, 0 = no)")
|
||||
parser.add_argument("--automap", choices=["1", "0"], help="Enable folder automapping (1 = yes, 0 = no)")
|
||||
parser.add_argument("--skipcrossduplicates", choices=["1", "0"], help="Skip cross-account duplicates (1 = yes, 0 = no)")
|
||||
parser.add_argument("--subscribeall", choices=["1", "0"], help="Subscribe to all folders (1 = yes, 0 = no)")
|
||||
parser.add_argument("--active", choices=["1", "0"], help="Activate syncjob (1 = yes, 0 = no)")
|
||||
parser.add_argument("--force", action="store_true", help="Force the syncjob to run even if it is not active")
|
||||
|
||||
128
data/Dockerfiles/controller/mailcow-adm/modules/Docker.py
Normal file
128
data/Dockerfiles/controller/mailcow-adm/modules/Docker.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import docker
|
||||
from docker.errors import APIError
|
||||
|
||||
class Docker:
|
||||
def __init__(self):
|
||||
self.client = docker.from_env()
|
||||
|
||||
def exec_command(self, container_name, cmd, user=None):
|
||||
"""
|
||||
Execute a command in a container by its container name.
|
||||
:param container_name: The name of the container.
|
||||
:param cmd: The command to execute as a list (e.g., ["ls", "-la"]).
|
||||
:param user: The user to execute the command as (optional).
|
||||
:return: A standardized response with status, output, and exit_code.
|
||||
"""
|
||||
|
||||
filters = {"name": container_name}
|
||||
|
||||
try:
|
||||
for container in self.client.containers.list(filters=filters):
|
||||
exec_result = container.exec_run(cmd, user=user)
|
||||
return {
|
||||
"status": "success",
|
||||
"exit_code": exec_result.exit_code,
|
||||
"output": exec_result.output.decode("utf-8")
|
||||
}
|
||||
except APIError as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "APIError",
|
||||
"output": str(e)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "Exception",
|
||||
"output": str(e)
|
||||
}
|
||||
|
||||
def start_container(self, container_name):
|
||||
"""
|
||||
Start a container by its container name.
|
||||
:param container_name: The name of the container.
|
||||
:return: A standardized response with status, output, and exit_code.
|
||||
"""
|
||||
|
||||
filters = {"name": container_name}
|
||||
|
||||
try:
|
||||
for container in self.client.containers.list(filters=filters):
|
||||
container.start()
|
||||
return {
|
||||
"status": "success",
|
||||
"exit_code": "0",
|
||||
"output": f"Container '{container_name}' started successfully."
|
||||
}
|
||||
except APIError as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "APIError",
|
||||
"output": str(e)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"error_type": "Exception",
|
||||
"output": str(e)
|
||||
}
|
||||
|
||||
def stop_container(self, container_name):
|
||||
"""
|
||||
Stop a container by its container name.
|
||||
:param container_name: The name of the container.
|
||||
:return: A standardized response with status, output, and exit_code.
|
||||
"""
|
||||
|
||||
filters = {"name": container_name}
|
||||
|
||||
try:
|
||||
for container in self.client.containers.list(filters=filters):
|
||||
container.stop()
|
||||
return {
|
||||
"status": "success",
|
||||
"exit_code": "0",
|
||||
"output": f"Container '{container_name}' stopped successfully."
|
||||
}
|
||||
except APIError as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "APIError",
|
||||
"output": str(e)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "Exception",
|
||||
"output": str(e)
|
||||
}
|
||||
|
||||
def restart_container(self, container_name):
|
||||
"""
|
||||
Restart a container by its container name.
|
||||
:param container_name: The name of the container.
|
||||
:return: A standardized response with status, output, and exit_code.
|
||||
"""
|
||||
|
||||
filters = {"name": container_name}
|
||||
|
||||
try:
|
||||
for container in self.client.containers.list(filters=filters):
|
||||
container.restart()
|
||||
return {
|
||||
"status": "success",
|
||||
"exit_code": "0",
|
||||
"output": f"Container '{container_name}' restarted successfully."
|
||||
}
|
||||
except APIError as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "APIError",
|
||||
"output": str(e)
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"status": "error",
|
||||
"exit_code": "Exception",
|
||||
"output": str(e)
|
||||
}
|
||||
206
data/Dockerfiles/controller/mailcow-adm/modules/Dovecot.py
Normal file
206
data/Dockerfiles/controller/mailcow-adm/modules/Dovecot.py
Normal file
@@ -0,0 +1,206 @@
|
||||
import os
|
||||
|
||||
from modules.Docker import Docker
|
||||
|
||||
class Dovecot:
|
||||
def __init__(self):
|
||||
self.docker = Docker()
|
||||
|
||||
def decryptMaildir(self, source_dir="/var/vmail/", output_dir=None):
|
||||
"""
|
||||
Decrypt files in /var/vmail using doveadm if they are encrypted.
|
||||
:param output_dir: Directory inside the Dovecot container to store decrypted files, Default overwrite.
|
||||
"""
|
||||
private_key = "/mail_crypt/ecprivkey.pem"
|
||||
public_key = "/mail_crypt/ecpubkey.pem"
|
||||
|
||||
if output_dir:
|
||||
# Ensure the output directory exists inside the container
|
||||
mkdir_result = self.docker.exec_command("dovecot-mailcow", f"bash -c 'mkdir -p {output_dir} && chown vmail:vmail {output_dir}'")
|
||||
if mkdir_result.get("status") != "success":
|
||||
print(f"Error creating output directory: {mkdir_result.get('output')}")
|
||||
return
|
||||
|
||||
find_command = [
|
||||
"find", source_dir, "-type", "f", "-regextype", "egrep", "-regex", ".*S=.*W=.*"
|
||||
]
|
||||
|
||||
try:
|
||||
find_result = self.docker.exec_command("dovecot-mailcow", " ".join(find_command))
|
||||
if find_result.get("status") != "success":
|
||||
print(f"Error finding files: {find_result.get('output')}")
|
||||
return
|
||||
|
||||
files = find_result.get("output", "").splitlines()
|
||||
|
||||
for file in files:
|
||||
head_command = f"head -c7 {file}"
|
||||
head_result = self.docker.exec_command("dovecot-mailcow", head_command)
|
||||
if head_result.get("status") == "success" and head_result.get("output", "").strip() == "CRYPTED":
|
||||
if output_dir:
|
||||
# Preserve the directory structure in the output directory
|
||||
relative_path = os.path.relpath(file, source_dir)
|
||||
output_file = os.path.join(output_dir, relative_path)
|
||||
current_path = output_dir
|
||||
for part in os.path.dirname(relative_path).split(os.sep):
|
||||
current_path = os.path.join(current_path, part)
|
||||
mkdir_result = self.docker.exec_command("dovecot-mailcow", f"bash -c '[ ! -d {current_path} ] && mkdir {current_path} && chown vmail:vmail {current_path}'")
|
||||
if mkdir_result.get("status") != "success":
|
||||
print(f"Error creating directory {current_path}: {mkdir_result.get('output')}")
|
||||
continue
|
||||
else:
|
||||
# Overwrite the original file
|
||||
output_file = file
|
||||
|
||||
decrypt_command = (
|
||||
f"bash -c 'doveadm fs get compress lz4:1:crypt:private_key_path={private_key}:public_key_path={public_key}:posix:prefix=/ {file} > {output_file}'"
|
||||
)
|
||||
|
||||
decrypt_result = self.docker.exec_command("dovecot-mailcow", decrypt_command)
|
||||
if decrypt_result.get("status") == "success":
|
||||
print(f"Decrypted {file}")
|
||||
|
||||
# Verify the file size and set permissions
|
||||
size_check_command = f"bash -c '[ -s {output_file} ] && chmod 600 {output_file} && chown vmail:vmail {output_file} || rm -f {output_file}'"
|
||||
size_check_result = self.docker.exec_command("dovecot-mailcow", size_check_command)
|
||||
if size_check_result.get("status") != "success":
|
||||
print(f"Error setting permissions for {output_file}: {size_check_result.get('output')}\n")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during decryption: {e}")
|
||||
|
||||
return "Done"
|
||||
|
||||
def encryptMaildir(self, source_dir="/var/vmail/", output_dir=None):
|
||||
"""
|
||||
Encrypt files in /var/vmail using doveadm if they are not already encrypted.
|
||||
:param source_dir: Directory inside the Dovecot container to encrypt files.
|
||||
:param output_dir: Directory inside the Dovecot container to store encrypted files, Default overwrite.
|
||||
"""
|
||||
private_key = "/mail_crypt/ecprivkey.pem"
|
||||
public_key = "/mail_crypt/ecpubkey.pem"
|
||||
|
||||
if output_dir:
|
||||
# Ensure the output directory exists inside the container
|
||||
mkdir_result = self.docker.exec_command("dovecot-mailcow", f"mkdir -p {output_dir}")
|
||||
if mkdir_result.get("status") != "success":
|
||||
print(f"Error creating output directory: {mkdir_result.get('output')}")
|
||||
return
|
||||
|
||||
find_command = [
|
||||
"find", source_dir, "-type", "f", "-regextype", "egrep", "-regex", ".*S=.*W=.*"
|
||||
]
|
||||
|
||||
try:
|
||||
find_result = self.docker.exec_command("dovecot-mailcow", " ".join(find_command))
|
||||
if find_result.get("status") != "success":
|
||||
print(f"Error finding files: {find_result.get('output')}")
|
||||
return
|
||||
|
||||
files = find_result.get("output", "").splitlines()
|
||||
|
||||
for file in files:
|
||||
head_command = f"head -c7 {file}"
|
||||
head_result = self.docker.exec_command("dovecot-mailcow", head_command)
|
||||
if head_result.get("status") == "success" and head_result.get("output", "").strip() != "CRYPTED":
|
||||
if output_dir:
|
||||
# Preserve the directory structure in the output directory
|
||||
relative_path = os.path.relpath(file, source_dir)
|
||||
output_file = os.path.join(output_dir, relative_path)
|
||||
current_path = output_dir
|
||||
for part in os.path.dirname(relative_path).split(os.sep):
|
||||
current_path = os.path.join(current_path, part)
|
||||
mkdir_result = self.docker.exec_command("dovecot-mailcow", f"bash -c '[ ! -d {current_path} ] && mkdir {current_path} && chown vmail:vmail {current_path}'")
|
||||
if mkdir_result.get("status") != "success":
|
||||
print(f"Error creating directory {current_path}: {mkdir_result.get('output')}")
|
||||
continue
|
||||
else:
|
||||
# Overwrite the original file
|
||||
output_file = file
|
||||
|
||||
encrypt_command = (
|
||||
f"bash -c 'doveadm fs put crypt private_key_path={private_key}:public_key_path={public_key}:posix:prefix=/ {file} {output_file}'"
|
||||
)
|
||||
|
||||
encrypt_result = self.docker.exec_command("dovecot-mailcow", encrypt_command)
|
||||
if encrypt_result.get("status") == "success":
|
||||
print(f"Encrypted {file}")
|
||||
|
||||
# Set permissions
|
||||
permissions_command = f"bash -c 'chmod 600 {output_file} && chown 5000:5000 {output_file}'"
|
||||
permissions_result = self.docker.exec_command("dovecot-mailcow", permissions_command)
|
||||
if permissions_result.get("status") != "success":
|
||||
print(f"Error setting permissions for {output_file}: {permissions_result.get('output')}\n")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during encryption: {e}")
|
||||
|
||||
return "Done"
|
||||
|
||||
def listDeletedMaildirs(self, source_dir="/var/vmail/_garbage"):
|
||||
"""
|
||||
List deleted maildirs in the specified garbage directory.
|
||||
:param source_dir: Directory to search for deleted maildirs.
|
||||
:return: List of maildirs.
|
||||
"""
|
||||
list_command = ["bash", "-c", f"ls -la {source_dir}"]
|
||||
|
||||
try:
|
||||
result = self.docker.exec_command("dovecot-mailcow", list_command)
|
||||
if result.get("status") != "success":
|
||||
print(f"Error listing deleted maildirs: {result.get('output')}")
|
||||
return []
|
||||
|
||||
lines = result.get("output", "").splitlines()
|
||||
maildirs = {}
|
||||
|
||||
for idx, line in enumerate(lines):
|
||||
parts = line.split()
|
||||
if "_" in line:
|
||||
folder_name = parts[-1]
|
||||
time, maildir = folder_name.split("_", 1)
|
||||
|
||||
if maildir.endswith("_index"):
|
||||
main_item = maildir[:-6]
|
||||
if main_item in maildirs:
|
||||
maildirs[main_item]["has_index"] = True
|
||||
else:
|
||||
maildirs[maildir] = {"item": idx, "time": time, "name": maildir, "has_index": False}
|
||||
|
||||
return list(maildirs.values())
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error during listing deleted maildirs: {e}")
|
||||
return []
|
||||
|
||||
def restoreMaildir(self, username, item, source_dir="/var/vmail/_garbage"):
|
||||
"""
|
||||
Restore a maildir item for a specific user from the deleted maildirs.
|
||||
:param username: Username to restore the item to.
|
||||
:param item: Item to restore (e.g., mailbox, folder).
|
||||
:param source_dir: Directory containing deleted maildirs.
|
||||
:return: Response from Dovecot.
|
||||
"""
|
||||
username_splitted = username.split("@")
|
||||
maildirs = self.listDeletedMaildirs()
|
||||
|
||||
maildir = None
|
||||
for mdir in maildirs:
|
||||
if mdir["item"] == int(item):
|
||||
maildir = mdir
|
||||
break
|
||||
if not maildir:
|
||||
return {"status": "error", "message": "Maildir not found."}
|
||||
|
||||
restore_command = f"mv {source_dir}/{maildir['time']}_{maildir['name']} /var/vmail/{username_splitted[1]}/{username_splitted[0]}"
|
||||
restore_index_command = f"mv {source_dir}/{maildir['time']}_{maildir['name']}_index /var/vmail_index/{username}"
|
||||
|
||||
result = self.docker.exec_command("dovecot-mailcow", ["bash", "-c", restore_command])
|
||||
if result.get("status") != "success":
|
||||
return {"status": "error", "message": "Failed to restore maildir."}
|
||||
|
||||
result = self.docker.exec_command("dovecot-mailcow", ["bash", "-c", restore_index_command])
|
||||
if result.get("status") != "success":
|
||||
return {"status": "error", "message": "Failed to restore maildir index."}
|
||||
|
||||
return "Done"
|
||||
457
data/Dockerfiles/controller/mailcow-adm/modules/Mailcow.py
Normal file
457
data/Dockerfiles/controller/mailcow-adm/modules/Mailcow.py
Normal file
@@ -0,0 +1,457 @@
|
||||
import requests
|
||||
import urllib3
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
import mysql.connector
|
||||
from contextlib import contextmanager
|
||||
from datetime import datetime
|
||||
from modules.Docker import Docker
|
||||
|
||||
|
||||
class Mailcow:
|
||||
def __init__(self):
|
||||
self.apiUrl = "/api/v1"
|
||||
self.ignore_ssl_errors = True
|
||||
|
||||
self.baseUrl = f"https://{os.getenv('IPv4_NETWORK', '172.22.1')}.247:{os.getenv('HTTPS_PORT', '443')}"
|
||||
self.host = os.getenv("MAILCOW_HOSTNAME", "")
|
||||
self.apiKey = ""
|
||||
if self.ignore_ssl_errors:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
self.db_config = {
|
||||
'user': os.getenv('DBUSER'),
|
||||
'password': os.getenv('DBPASS'),
|
||||
'database': os.getenv('DBNAME'),
|
||||
'unix_socket': '/var/run/mysqld/mysqld.sock',
|
||||
}
|
||||
|
||||
self.docker = Docker()
|
||||
|
||||
|
||||
# API Functions
|
||||
def addDomain(self, domain):
|
||||
"""
|
||||
Add a domain to the mailcow instance.
|
||||
:param domain: Dictionary containing domain details.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.post('/add/domain', domain)
|
||||
|
||||
def addMailbox(self, mailbox):
|
||||
"""
|
||||
Add a mailbox to the mailcow instance.
|
||||
:param mailbox: Dictionary containing mailbox details.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.post('/add/mailbox', mailbox)
|
||||
|
||||
def addAlias(self, alias):
|
||||
"""
|
||||
Add an alias to the mailcow instance.
|
||||
:param alias: Dictionary containing alias details.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.post('/add/alias', alias)
|
||||
|
||||
def addSyncjob(self, syncjob):
|
||||
"""
|
||||
Add a sync job to the mailcow instance.
|
||||
:param syncjob: Dictionary containing sync job details.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.post('/add/syncjob', syncjob)
|
||||
|
||||
def addDomainadmin(self, domainadmin):
|
||||
"""
|
||||
Add a domain admin to the mailcow instance.
|
||||
:param domainadmin: Dictionary containing domain admin details.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.post('/add/domain-admin', domainadmin)
|
||||
|
||||
def deleteDomain(self, domain):
|
||||
"""
|
||||
Delete a domain from the mailcow instance.
|
||||
:param domain: Name of the domain to delete.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
items = [domain]
|
||||
return self.post('/delete/domain', items)
|
||||
|
||||
def deleteAlias(self, id):
|
||||
"""
|
||||
Delete an alias from the mailcow instance.
|
||||
:param id: ID of the alias to delete.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
items = [id]
|
||||
return self.post('/delete/alias', items)
|
||||
|
||||
def deleteSyncjob(self, id):
|
||||
"""
|
||||
Delete a sync job from the mailcow instance.
|
||||
:param id: ID of the sync job to delete.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
items = [id]
|
||||
return self.post('/delete/syncjob', items)
|
||||
|
||||
def deleteMailbox(self, mailbox):
|
||||
"""
|
||||
Delete a mailbox from the mailcow instance.
|
||||
:param mailbox: Name of the mailbox to delete.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
items = [mailbox]
|
||||
return self.post('/delete/mailbox', items)
|
||||
|
||||
def deleteDomainadmin(self, username):
|
||||
"""
|
||||
Delete a domain admin from the mailcow instance.
|
||||
:param username: Username of the domain admin to delete.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
items = [username]
|
||||
return self.post('/delete/domain-admin', items)
|
||||
|
||||
def post(self, endpoint, data):
|
||||
"""
|
||||
Make a POST request to the mailcow API.
|
||||
:param endpoint: The API endpoint to post to.
|
||||
:param data: Data to be sent in the POST request.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
url = f"{self.baseUrl}{self.apiUrl}/{endpoint.lstrip('/')}"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Host": self.host
|
||||
}
|
||||
if self.apiKey:
|
||||
headers["X-Api-Key"] = self.apiKey
|
||||
response = requests.post(
|
||||
url,
|
||||
json=data,
|
||||
headers=headers,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def getDomain(self, domain):
|
||||
"""
|
||||
Get a domain from the mailcow instance.
|
||||
:param domain: Name of the domain to get.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
return self.get(f'/get/domain/{domain}')
|
||||
|
||||
def getMailbox(self, username):
|
||||
"""
|
||||
Get a mailbox from the mailcow instance.
|
||||
:param mailbox: Dictionary containing mailbox details (e.g. {"username": "user@example.com"})
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get(f'/get/mailbox/{username}')
|
||||
|
||||
def getAlias(self, id):
|
||||
"""
|
||||
Get an alias from the mailcow instance.
|
||||
:param alias: Dictionary containing alias details (e.g. {"address": "alias@example.com"})
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get(f'/get/alias/{id}')
|
||||
|
||||
def getSyncjob(self, id):
|
||||
"""
|
||||
Get a sync job from the mailcow instance.
|
||||
:param syncjob: Dictionary containing sync job details (e.g. {"id": "123"})
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get(f'/get/syncjobs/{id}')
|
||||
|
||||
def getDomainadmin(self, username):
|
||||
"""
|
||||
Get a domain admin from the mailcow instance.
|
||||
:param username: Username of the domain admin to get.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get(f'/get/domain-admin/{username}')
|
||||
|
||||
def getStatusVersion(self):
|
||||
"""
|
||||
Get the version of the mailcow instance.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get('/get/status/version')
|
||||
|
||||
def getStatusVmail(self):
|
||||
"""
|
||||
Get the vmail status from the mailcow instance.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get('/get/status/vmail')
|
||||
|
||||
def getStatusContainers(self):
|
||||
"""
|
||||
Get the status of containers from the mailcow instance.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
return self.get('/get/status/containers')
|
||||
|
||||
def get(self, endpoint, params=None):
|
||||
"""
|
||||
Make a GET request to the mailcow API.
|
||||
:param endpoint: The API endpoint to get from.
|
||||
:param params: Parameters to be sent in the GET request.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
url = f"{self.baseUrl}{self.apiUrl}/{endpoint.lstrip('/')}"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Host": self.host
|
||||
}
|
||||
if self.apiKey:
|
||||
headers["X-Api-Key"] = self.apiKey
|
||||
response = requests.get(
|
||||
url,
|
||||
params=params,
|
||||
headers=headers,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def editDomain(self, domain, attributes):
|
||||
"""
|
||||
Edit an existing domain in the mailcow instance.
|
||||
:param domain: Name of the domain to edit
|
||||
:param attributes: Dictionary containing the new domain attributes.
|
||||
"""
|
||||
|
||||
items = [domain]
|
||||
return self.edit('/edit/domain', items, attributes)
|
||||
|
||||
def editMailbox(self, mailbox, attributes):
|
||||
"""
|
||||
Edit an existing mailbox in the mailcow instance.
|
||||
:param mailbox: Name of the mailbox to edit
|
||||
:param attributes: Dictionary containing the new mailbox attributes.
|
||||
"""
|
||||
|
||||
items = [mailbox]
|
||||
return self.edit('/edit/mailbox', items, attributes)
|
||||
|
||||
def editAlias(self, alias, attributes):
|
||||
"""
|
||||
Edit an existing alias in the mailcow instance.
|
||||
:param alias: Name of the alias to edit
|
||||
:param attributes: Dictionary containing the new alias attributes.
|
||||
"""
|
||||
|
||||
items = [alias]
|
||||
return self.edit('/edit/alias', items, attributes)
|
||||
|
||||
def editSyncjob(self, syncjob, attributes):
|
||||
"""
|
||||
Edit an existing sync job in the mailcow instance.
|
||||
:param syncjob: Name of the sync job to edit
|
||||
:param attributes: Dictionary containing the new sync job attributes.
|
||||
"""
|
||||
|
||||
items = [syncjob]
|
||||
return self.edit('/edit/syncjob', items, attributes)
|
||||
|
||||
def editDomainadmin(self, username, attributes):
|
||||
"""
|
||||
Edit an existing domain admin in the mailcow instance.
|
||||
:param username: Username of the domain admin to edit
|
||||
:param attributes: Dictionary containing the new domain admin attributes.
|
||||
"""
|
||||
|
||||
items = [username]
|
||||
return self.edit('/edit/domain-admin', items, attributes)
|
||||
|
||||
def edit(self, endpoint, items, attributes):
|
||||
"""
|
||||
Make a POST request to edit items in the mailcow API.
|
||||
:param items: List of items to edit.
|
||||
:param attributes: Dictionary containing the new attributes for the items.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
|
||||
url = f"{self.baseUrl}{self.apiUrl}/{endpoint.lstrip('/')}"
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Host": self.host
|
||||
}
|
||||
if self.apiKey:
|
||||
headers["X-Api-Key"] = self.apiKey
|
||||
data = {
|
||||
"items": items,
|
||||
"attr": attributes
|
||||
}
|
||||
response = requests.post(
|
||||
url,
|
||||
json=data,
|
||||
headers=headers,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
|
||||
# System Functions
|
||||
def runSyncjob(self, id, force=False):
|
||||
"""
|
||||
Run a sync job.
|
||||
:param id: ID of the sync job to run.
|
||||
:return: Response from the imapsync script.
|
||||
"""
|
||||
|
||||
creds_path = "/app/sieve.creds"
|
||||
|
||||
conn = mysql.connector.connect(**self.db_config)
|
||||
cursor = conn.cursor(dictionary=True)
|
||||
|
||||
with open(creds_path, 'r') as file:
|
||||
master_user, master_pass = file.read().strip().split(':')
|
||||
|
||||
query = ("SELECT * FROM imapsync WHERE id = %s")
|
||||
cursor.execute(query, (id,))
|
||||
|
||||
success = False
|
||||
syncjob = cursor.fetchone()
|
||||
if not syncjob:
|
||||
cursor.close()
|
||||
conn.close()
|
||||
return f"Sync job with ID {id} not found."
|
||||
if syncjob['active'] == 0 and not force:
|
||||
cursor.close()
|
||||
conn.close()
|
||||
return f"Sync job with ID {id} is not active."
|
||||
|
||||
enc1_flag = "--tls1" if syncjob['enc1'] == "TLS" else "--ssl1" if syncjob['enc1'] == "SSL" else None
|
||||
|
||||
|
||||
passfile1_path = f"/tmp/passfile1_{id}.txt"
|
||||
passfile2_path = f"/tmp/passfile2_{id}.txt"
|
||||
passfile1_cmd = [
|
||||
"sh", "-c",
|
||||
f"echo {syncjob['password1']} > {passfile1_path}"
|
||||
]
|
||||
passfile2_cmd = [
|
||||
"sh", "-c",
|
||||
f"echo {master_pass} > {passfile2_path}"
|
||||
]
|
||||
|
||||
self.docker.exec_command("dovecot-mailcow", passfile1_cmd)
|
||||
self.docker.exec_command("dovecot-mailcow", passfile2_cmd)
|
||||
|
||||
imapsync_cmd = [
|
||||
"/usr/local/bin/imapsync",
|
||||
"--tmpdir", "/tmp",
|
||||
"--nofoldersizes",
|
||||
"--addheader"
|
||||
]
|
||||
|
||||
if int(syncjob['timeout1']) > 0:
|
||||
imapsync_cmd.extend(['--timeout1', str(syncjob['timeout1'])])
|
||||
if int(syncjob['timeout2']) > 0:
|
||||
imapsync_cmd.extend(['--timeout2', str(syncjob['timeout2'])])
|
||||
if syncjob['exclude']:
|
||||
imapsync_cmd.extend(['--exclude', syncjob['exclude']])
|
||||
if syncjob['subfolder2']:
|
||||
imapsync_cmd.extend(['--subfolder2', syncjob['subfolder2']])
|
||||
if int(syncjob['maxage']) > 0:
|
||||
imapsync_cmd.extend(['--maxage', str(syncjob['maxage'])])
|
||||
if int(syncjob['maxbytespersecond']) > 0:
|
||||
imapsync_cmd.extend(['--maxbytespersecond', str(syncjob['maxbytespersecond'])])
|
||||
if int(syncjob['delete2duplicates']) == 1:
|
||||
imapsync_cmd.append("--delete2duplicates")
|
||||
if int(syncjob['subscribeall']) == 1:
|
||||
imapsync_cmd.append("--subscribeall")
|
||||
if int(syncjob['delete1']) == 1:
|
||||
imapsync_cmd.append("--delete")
|
||||
if int(syncjob['delete2']) == 1:
|
||||
imapsync_cmd.append("--delete2")
|
||||
if int(syncjob['automap']) == 1:
|
||||
imapsync_cmd.append("--automap")
|
||||
if int(syncjob['skipcrossduplicates']) == 1:
|
||||
imapsync_cmd.append("--skipcrossduplicates")
|
||||
if enc1_flag:
|
||||
imapsync_cmd.append(enc1_flag)
|
||||
|
||||
imapsync_cmd.extend([
|
||||
"--host1", syncjob['host1'],
|
||||
"--user1", syncjob['user1'],
|
||||
"--passfile1", passfile1_path,
|
||||
"--port1", str(syncjob['port1']),
|
||||
"--host2", "localhost",
|
||||
"--user2", f"{syncjob['user2']}*{master_user}",
|
||||
"--passfile2", passfile2_path
|
||||
])
|
||||
|
||||
if syncjob['dry'] == 1:
|
||||
imapsync_cmd.append("--dry")
|
||||
|
||||
imapsync_cmd.extend([
|
||||
"--no-modulesversion",
|
||||
"--noreleasecheck"
|
||||
])
|
||||
|
||||
try:
|
||||
cursor.execute("UPDATE imapsync SET is_running = 1, success = NULL, exit_status = NULL WHERE id = %s", (id,))
|
||||
conn.commit()
|
||||
|
||||
result = self.docker.exec_command("dovecot-mailcow", imapsync_cmd)
|
||||
print(result)
|
||||
|
||||
success = result['status'] == "success" and result['exit_code'] == 0
|
||||
cursor.execute(
|
||||
"UPDATE imapsync SET returned_text = %s, success = %s, exit_status = %s WHERE id = %s",
|
||||
(result['output'], int(success), result['exit_code'], id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
except Exception as e:
|
||||
cursor.execute(
|
||||
"UPDATE imapsync SET returned_text = %s, success = 0 WHERE id = %s",
|
||||
(str(e), id)
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
finally:
|
||||
cursor.execute("UPDATE imapsync SET last_run = NOW(), is_running = 0 WHERE id = %s", (id,))
|
||||
conn.commit()
|
||||
|
||||
delete_passfile1_cmd = [
|
||||
"sh", "-c",
|
||||
f"rm -f {passfile1_path}"
|
||||
]
|
||||
delete_passfile2_cmd = [
|
||||
"sh", "-c",
|
||||
f"rm -f {passfile2_path}"
|
||||
]
|
||||
self.docker.exec_command("dovecot-mailcow", delete_passfile1_cmd)
|
||||
self.docker.exec_command("dovecot-mailcow", delete_passfile2_cmd)
|
||||
|
||||
cursor.close()
|
||||
conn.close()
|
||||
|
||||
return "Sync job completed successfully." if success else "Sync job failed."
|
||||
64
data/Dockerfiles/controller/mailcow-adm/modules/Mailer.py
Normal file
64
data/Dockerfiles/controller/mailcow-adm/modules/Mailer.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import smtplib
|
||||
import json
|
||||
import os
|
||||
from email.mime.text import MIMEText
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from jinja2 import Environment, BaseLoader
|
||||
|
||||
class Mailer:
|
||||
def __init__(self, smtp_host, smtp_port, username, password, use_tls=True):
|
||||
self.smtp_host = smtp_host
|
||||
self.smtp_port = smtp_port
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.use_tls = use_tls
|
||||
self.server = None
|
||||
self.env = Environment(loader=BaseLoader())
|
||||
|
||||
def connect(self):
|
||||
print("Connecting to the SMTP server...")
|
||||
self.server = smtplib.SMTP(self.smtp_host, self.smtp_port)
|
||||
if self.use_tls:
|
||||
self.server.starttls()
|
||||
print("TLS activated!")
|
||||
if self.username and self.password:
|
||||
self.server.login(self.username, self.password)
|
||||
print("Authenticated!")
|
||||
|
||||
def disconnect(self):
|
||||
if self.server:
|
||||
try:
|
||||
if self.server.sock:
|
||||
self.server.quit()
|
||||
except smtplib.SMTPServerDisconnected:
|
||||
pass
|
||||
finally:
|
||||
self.server = None
|
||||
|
||||
def render_inline_template(self, template_string, context):
|
||||
template = self.env.from_string(template_string)
|
||||
return template.render(context)
|
||||
|
||||
def send_mail(self, subject, from_addr, to_addrs, template, context = {}):
|
||||
try:
|
||||
if template == "":
|
||||
print("Cannot send email, template is empty!")
|
||||
return "Failed: Template is empty."
|
||||
|
||||
body = self.render_inline_template(template, context)
|
||||
|
||||
msg = MIMEMultipart()
|
||||
msg['From'] = from_addr
|
||||
msg['To'] = ', '.join(to_addrs) if isinstance(to_addrs, list) else to_addrs
|
||||
msg['Subject'] = subject
|
||||
msg.attach(MIMEText(body, 'html'))
|
||||
|
||||
self.connect()
|
||||
self.server.sendmail(from_addr, to_addrs, msg.as_string())
|
||||
self.disconnect()
|
||||
return f"Success: Email sent to {msg['To']}"
|
||||
except Exception as e:
|
||||
print(f"Error during send_mail: {type(e).__name__}: {e}")
|
||||
return f"Failed: {type(e).__name__}: {e}"
|
||||
finally:
|
||||
self.disconnect()
|
||||
51
data/Dockerfiles/controller/mailcow-adm/modules/Reader.py
Normal file
51
data/Dockerfiles/controller/mailcow-adm/modules/Reader.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from jinja2 import Environment, Template
|
||||
import csv
|
||||
|
||||
def split_at(value, sep, idx):
|
||||
try:
|
||||
return value.split(sep)[idx]
|
||||
except Exception:
|
||||
return ''
|
||||
|
||||
class Reader:
|
||||
"""
|
||||
Reader class to handle reading and processing of CSV and JSON files for mailcow.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def read_csv(self, file_path, delimiter=',', encoding='iso-8859-1'):
|
||||
"""
|
||||
Read a CSV file and return a list of dictionaries.
|
||||
Each dictionary represents a row in the CSV file.
|
||||
:param file_path: Path to the CSV file.
|
||||
:param delimiter: Delimiter used in the CSV file (default: ',').
|
||||
"""
|
||||
with open(file_path, mode='r', encoding=encoding) as file:
|
||||
reader = csv.DictReader(file, delimiter=delimiter)
|
||||
reader.fieldnames = [h.replace(" ", "_") if h else h for h in reader.fieldnames]
|
||||
return [row for row in reader]
|
||||
|
||||
def map_csv_data(self, data, mapping_file_path, encoding='iso-8859-1'):
|
||||
"""
|
||||
Map CSV data to a specific structure based on the provided Jinja2 template file.
|
||||
:param data: List of dictionaries representing CSV rows.
|
||||
:param mapping_file_path: Path to the Jinja2 template file.
|
||||
:return: List of dictionaries with mapped data.
|
||||
"""
|
||||
with open(mapping_file_path, 'r', encoding=encoding) as tpl_file:
|
||||
template_content = tpl_file.read()
|
||||
env = Environment()
|
||||
env.filters['split_at'] = split_at
|
||||
template = env.from_string(template_content)
|
||||
|
||||
mapped_data = []
|
||||
for row in data:
|
||||
rendered = template.render(**row)
|
||||
try:
|
||||
mapped_row = eval(rendered)
|
||||
except Exception:
|
||||
mapped_row = rendered
|
||||
mapped_data.append(mapped_row)
|
||||
return mapped_data
|
||||
512
data/Dockerfiles/controller/mailcow-adm/modules/Sogo.py
Normal file
512
data/Dockerfiles/controller/mailcow-adm/modules/Sogo.py
Normal file
@@ -0,0 +1,512 @@
|
||||
import requests
|
||||
import urllib3
|
||||
import os
|
||||
from uuid import uuid4
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class Sogo:
|
||||
def __init__(self, username, password=""):
|
||||
self.apiUrl = "/SOGo/so"
|
||||
self.davUrl = "/SOGo/dav"
|
||||
self.ignore_ssl_errors = True
|
||||
|
||||
self.baseUrl = f"https://{os.getenv('IPv4_NETWORK', '172.22.1')}.247:{os.getenv('HTTPS_PORT', '443')}"
|
||||
self.host = os.getenv("MAILCOW_HOSTNAME", "")
|
||||
if self.ignore_ssl_errors:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def addCalendar(self, calendar_name):
|
||||
"""
|
||||
Add a new calendar to the sogo instance.
|
||||
:param calendar_name: Name of the calendar to be created
|
||||
:return: Response from the sogo API.
|
||||
"""
|
||||
|
||||
res = self.post(f"/{self.username}/Calendar/createFolder", {
|
||||
"name": calendar_name
|
||||
})
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def getCalendarIdByName(self, calendar_name):
|
||||
"""
|
||||
Get the calendar ID by its name.
|
||||
:param calendar_name: Name of the calendar to find
|
||||
:return: Calendar ID if found, otherwise None.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Calendar/calendarslist")
|
||||
try:
|
||||
for calendar in res.json()["calendars"]:
|
||||
if calendar['name'] == calendar_name:
|
||||
return calendar['id']
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
def getCalendar(self):
|
||||
"""
|
||||
Get calendar list.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Calendar/calendarslist")
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def deleteCalendar(self, calendar_id):
|
||||
"""
|
||||
Delete a calendar.
|
||||
:param calendar_id: ID of the calendar to be deleted
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
res = self.get(f"/{self.username}/Calendar/{calendar_id}/delete")
|
||||
return res.status_code == 204
|
||||
|
||||
def importCalendar(self, calendar_name, ics_file):
|
||||
"""
|
||||
Import a calendar from an ICS file.
|
||||
:param calendar_name: Name of the calendar to import into
|
||||
:param ics_file: Path to the ICS file to import
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(ics_file, "rb") as f:
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Could not open ICS file '{ics_file}': {e}")
|
||||
return {"status": "error", "message": str(e)}
|
||||
|
||||
new_calendar = self.addCalendar(calendar_name)
|
||||
selected_calendar = new_calendar.json()["id"]
|
||||
|
||||
url = f"{self.baseUrl}{self.apiUrl}/{self.username}/Calendar/{selected_calendar}/import"
|
||||
auth = (self.username, self.password)
|
||||
with open(ics_file, "rb") as f:
|
||||
files = {'icsFile': (ics_file, f, 'text/calendar')}
|
||||
res = requests.post(
|
||||
url,
|
||||
files=files,
|
||||
auth=auth,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
return None
|
||||
|
||||
def setCalendarACL(self, calendar_id, sharee_email, acl="r", subscribe=False):
|
||||
"""
|
||||
Set CalDAV calendar permissions for a user (sharee).
|
||||
:param calendar_id: ID of the calendar to share
|
||||
:param sharee_email: Email of the user to share with
|
||||
:param acl: "w" for write, "r" for read-only or combination "rw" for read-write
|
||||
:param subscribe: True will scubscribe the sharee to the calendar
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# Access rights
|
||||
if acl == "" or len(acl) > 2:
|
||||
return "Invalid acl level specified. Use 'w', 'r' or combinations like 'rw'."
|
||||
rights = [{
|
||||
"c_email": sharee_email,
|
||||
"uid": sharee_email,
|
||||
"userClass": "normal-user",
|
||||
"rights": {
|
||||
"Public": "None",
|
||||
"Private": "None",
|
||||
"Confidential": "None",
|
||||
"canCreateObjects": 0,
|
||||
"canEraseObjects": 0
|
||||
}
|
||||
}]
|
||||
if "w" in acl:
|
||||
rights[0]["rights"]["canCreateObjects"] = 1
|
||||
rights[0]["rights"]["canEraseObjects"] = 1
|
||||
if "r" in acl:
|
||||
rights[0]["rights"]["Public"] = "Viewer"
|
||||
rights[0]["rights"]["Private"] = "Viewer"
|
||||
rights[0]["rights"]["Confidential"] = "Viewer"
|
||||
|
||||
r_add = self.get(f"/{self.username}/Calendar/{calendar_id}/addUserInAcls?uid={sharee_email}")
|
||||
if r_add.status_code < 200 or r_add.status_code > 299:
|
||||
try:
|
||||
return r_add.json()
|
||||
except ValueError:
|
||||
return r_add.text
|
||||
|
||||
r_save = self.post(f"/{self.username}/Calendar/{calendar_id}/saveUserRights", rights)
|
||||
if r_save.status_code < 200 or r_save.status_code > 299:
|
||||
try:
|
||||
return r_save.json()
|
||||
except ValueError:
|
||||
return r_save.text
|
||||
|
||||
if subscribe:
|
||||
r_subscribe = self.get(f"/{self.username}/Calendar/{calendar_id}/subscribeUsers?uids={sharee_email}")
|
||||
if r_subscribe.status_code < 200 or r_subscribe.status_code > 299:
|
||||
try:
|
||||
return r_subscribe.json()
|
||||
except ValueError:
|
||||
return r_subscribe.text
|
||||
|
||||
return r_save.status_code == 200
|
||||
|
||||
def getCalendarACL(self, calendar_id):
|
||||
"""
|
||||
Get CalDAV calendar permissions for a user (sharee).
|
||||
:param calendar_id: ID of the calendar to get ACL from
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Calendar/{calendar_id}/acls")
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def deleteCalendarACL(self, calendar_id, sharee_email):
|
||||
"""
|
||||
Delete a calendar ACL for a user (sharee).
|
||||
:param calendar_id: ID of the calendar to delete ACL from
|
||||
:param sharee_email: Email of the user whose ACL to delete
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Calendar/{calendar_id}/removeUserFromAcls?uid={sharee_email}")
|
||||
return res.status_code == 204
|
||||
|
||||
def addAddressbook(self, addressbook_name):
|
||||
"""
|
||||
Add a new addressbook to the sogo instance.
|
||||
:param addressbook_name: Name of the addressbook to be created
|
||||
:return: Response from the sogo API.
|
||||
"""
|
||||
|
||||
res = self.post(f"/{self.username}/Contacts/createFolder", {
|
||||
"name": addressbook_name
|
||||
})
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def getAddressbookIdByName(self, addressbook_name):
|
||||
"""
|
||||
Get the addressbook ID by its name.
|
||||
:param addressbook_name: Name of the addressbook to find
|
||||
:return: Addressbook ID if found, otherwise None.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Contacts/addressbooksList")
|
||||
try:
|
||||
for addressbook in res.json()["addressbooks"]:
|
||||
if addressbook['name'] == addressbook_name:
|
||||
return addressbook['id']
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
def deleteAddressbook(self, addressbook_id):
|
||||
"""
|
||||
Delete an addressbook.
|
||||
:param addressbook_id: ID of the addressbook to be deleted
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Contacts/{addressbook_id}/delete")
|
||||
return res.status_code == 204
|
||||
|
||||
def getAddressbookList(self):
|
||||
"""
|
||||
Get addressbook list.
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Contacts/addressbooksList")
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def setAddressbookACL(self, addressbook_id, sharee_email, acl="r", subscribe=False):
|
||||
"""
|
||||
Set CalDAV addressbook permissions for a user (sharee).
|
||||
:param addressbook_id: ID of the addressbook to share
|
||||
:param sharee_email: Email of the user to share with
|
||||
:param acl: "w" for write, "r" for read-only or combination "rw" for read-write
|
||||
:param subscribe: True will subscribe the sharee to the addressbook
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# Access rights
|
||||
if acl == "" or len(acl) > 2:
|
||||
print("Invalid acl level specified. Use 's', 'w', 'r' or combinations like 'rws'.")
|
||||
return "Invalid acl level specified. Use 'w', 'r' or combinations like 'rw'."
|
||||
rights = [{
|
||||
"c_email": sharee_email,
|
||||
"uid": sharee_email,
|
||||
"userClass": "normal-user",
|
||||
"rights": {
|
||||
"canCreateObjects": 0,
|
||||
"canEditObjects": 0,
|
||||
"canEraseObjects": 0,
|
||||
"canViewObjects": 0,
|
||||
}
|
||||
}]
|
||||
if "w" in acl:
|
||||
rights[0]["rights"]["canCreateObjects"] = 1
|
||||
rights[0]["rights"]["canEditObjects"] = 1
|
||||
rights[0]["rights"]["canEraseObjects"] = 1
|
||||
if "r" in acl:
|
||||
rights[0]["rights"]["canViewObjects"] = 1
|
||||
|
||||
r_add = self.get(f"/{self.username}/Contacts/{addressbook_id}/addUserInAcls?uid={sharee_email}")
|
||||
if r_add.status_code < 200 or r_add.status_code > 299:
|
||||
try:
|
||||
return r_add.json()
|
||||
except ValueError:
|
||||
return r_add.text
|
||||
|
||||
r_save = self.post(f"/{self.username}/Contacts/{addressbook_id}/saveUserRights", rights)
|
||||
if r_save.status_code < 200 or r_save.status_code > 299:
|
||||
try:
|
||||
return r_save.json()
|
||||
except ValueError:
|
||||
return r_save.text
|
||||
|
||||
if subscribe:
|
||||
r_subscribe = self.get(f"/{self.username}/Contacts/{addressbook_id}/subscribeUsers?uids={sharee_email}")
|
||||
if r_subscribe.status_code < 200 or r_subscribe.status_code > 299:
|
||||
try:
|
||||
return r_subscribe.json()
|
||||
except ValueError:
|
||||
return r_subscribe.text
|
||||
|
||||
return r_save.status_code == 200
|
||||
|
||||
def getAddressbookACL(self, addressbook_id):
|
||||
"""
|
||||
Get CalDAV addressbook permissions for a user (sharee).
|
||||
:param addressbook_id: ID of the addressbook to get ACL from
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Contacts/{addressbook_id}/acls")
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def deleteAddressbookACL(self, addressbook_id, sharee_email):
|
||||
"""
|
||||
Delete an addressbook ACL for a user (sharee).
|
||||
:param addressbook_id: ID of the addressbook to delete ACL from
|
||||
:param sharee_email: Email of the user whose ACL to delete
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
|
||||
res = self.get(f"/{self.username}/Contacts/{addressbook_id}/removeUserFromAcls?uid={sharee_email}")
|
||||
return res.status_code == 204
|
||||
|
||||
def getAddressbookNewGuid(self, addressbook_id):
|
||||
"""
|
||||
Request a new GUID for a SOGo addressbook.
|
||||
:param addressbook_id: ID of the addressbook
|
||||
:return: JSON response from SOGo or None if not found
|
||||
"""
|
||||
res = self.get(f"/{self.username}/Contacts/{addressbook_id}/newguid")
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def addAddressbookContact(self, addressbook_id, contact_name, contact_email):
|
||||
"""
|
||||
Save a vCard as a contact in the specified addressbook.
|
||||
:param addressbook_id: ID of the addressbook
|
||||
:param contact_name: Name of the contact
|
||||
:param contact_email: Email of the contact
|
||||
:return: JSON response from SOGo or None if not found
|
||||
"""
|
||||
vcard_id = self.getAddressbookNewGuid(addressbook_id)
|
||||
contact_data = {
|
||||
"id": vcard_id["id"],
|
||||
"pid": vcard_id["pid"],
|
||||
"c_cn": contact_name,
|
||||
"emails": [{
|
||||
"type": "pref",
|
||||
"value": contact_email
|
||||
}],
|
||||
"isNew": True,
|
||||
"c_component": "vcard",
|
||||
}
|
||||
|
||||
endpoint = f"/{self.username}/Contacts/{addressbook_id}/{vcard_id['id']}/saveAsContact"
|
||||
res = self.post(endpoint, contact_data)
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def getAddressbookContacts(self, addressbook_id, contact_email=None):
|
||||
"""
|
||||
Get all contacts from the specified addressbook.
|
||||
:param addressbook_id: ID of the addressbook
|
||||
:return: JSON response with contacts or None if not found
|
||||
"""
|
||||
res = self.get(f"/{self.username}/Contacts/{addressbook_id}/view")
|
||||
try:
|
||||
res_json = res.json()
|
||||
headers = res_json.get("headers", [])
|
||||
if not headers or len(headers) < 2:
|
||||
return []
|
||||
|
||||
field_names = headers[0]
|
||||
contacts = []
|
||||
for row in headers[1:]:
|
||||
contact = dict(zip(field_names, row))
|
||||
contacts.append(contact)
|
||||
|
||||
if contact_email:
|
||||
contact = {}
|
||||
for c in contacts:
|
||||
if c["c_mail"] == contact_email or c["c_cn"] == contact_email:
|
||||
contact = c
|
||||
break
|
||||
return contact
|
||||
|
||||
return contacts
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def addAddressbookContactList(self, addressbook_id, contact_name, contact_email=None):
|
||||
"""
|
||||
Add a new contact list to the addressbook.
|
||||
:param addressbook_id: ID of the addressbook
|
||||
:param contact_name: Name of the contact list
|
||||
:param contact_email: Comma-separated emails to include in the list
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
gal_domain = self.username.split("@")[-1]
|
||||
vlist_id = self.getAddressbookNewGuid(addressbook_id)
|
||||
contact_emails = contact_email.split(",") if contact_email else []
|
||||
contacts = self.getAddressbookContacts(addressbook_id)
|
||||
|
||||
refs = []
|
||||
for contact in contacts:
|
||||
if contact['c_mail'] in contact_emails:
|
||||
refs.append({
|
||||
"refs": [],
|
||||
"categories": [],
|
||||
"c_screenname": contact.get("c_screenname", ""),
|
||||
"pid": contact.get("pid", vlist_id["pid"]),
|
||||
"id": contact.get("id", ""),
|
||||
"notes": [""],
|
||||
"empty": " ",
|
||||
"hasphoto": contact.get("hasphoto", 0),
|
||||
"c_cn": contact.get("c_cn", ""),
|
||||
"c_uid": contact.get("c_uid", None),
|
||||
"containername": contact.get("containername", f"GAL {gal_domain}"), # or your addressbook name
|
||||
"sourceid": contact.get("sourceid", gal_domain),
|
||||
"c_component": contact.get("c_component", "vcard"),
|
||||
"c_sn": contact.get("c_sn", ""),
|
||||
"c_givenname": contact.get("c_givenname", ""),
|
||||
"c_name": contact.get("c_name", contact.get("id", "")),
|
||||
"c_telephonenumber": contact.get("c_telephonenumber", ""),
|
||||
"fn": contact.get("fn", ""),
|
||||
"c_mail": contact.get("c_mail", ""),
|
||||
"emails": contact.get("emails", []),
|
||||
"c_o": contact.get("c_o", ""),
|
||||
"reference": contact.get("id", ""),
|
||||
"birthday": contact.get("birthday", "")
|
||||
})
|
||||
|
||||
contact_data = {
|
||||
"refs": refs,
|
||||
"categories": [],
|
||||
"c_screenname": None,
|
||||
"pid": vlist_id["pid"],
|
||||
"c_component": "vlist",
|
||||
"notes": [""],
|
||||
"empty": " ",
|
||||
"isNew": True,
|
||||
"id": vlist_id["id"],
|
||||
"c_cn": contact_name,
|
||||
"birthday": ""
|
||||
}
|
||||
|
||||
endpoint = f"/{self.username}/Contacts/{addressbook_id}/{vlist_id['id']}/saveAsList"
|
||||
res = self.post(endpoint, contact_data)
|
||||
try:
|
||||
return res.json()
|
||||
except ValueError:
|
||||
return res.text
|
||||
|
||||
def deleteAddressbookItem(self, addressbook_id, contact_name):
|
||||
"""
|
||||
Delete an addressbook item by its ID.
|
||||
:param addressbook_id: ID of the addressbook item to delete
|
||||
:param contact_name: Name of the contact to delete
|
||||
:return: Response from SOGo API.
|
||||
"""
|
||||
res = self.getAddressbookContacts(addressbook_id, contact_name)
|
||||
|
||||
if "id" not in res:
|
||||
print(f"Contact '{contact_name}' not found in addressbook '{addressbook_id}'.")
|
||||
return None
|
||||
res = self.post(f"/{self.username}/Contacts/{addressbook_id}/batchDelete", {
|
||||
"uids": [res["id"]],
|
||||
})
|
||||
return res.status_code == 204
|
||||
|
||||
def get(self, endpoint, params=None):
|
||||
"""
|
||||
Make a GET request to the mailcow API.
|
||||
:param endpoint: The API endpoint to get.
|
||||
:param params: Optional parameters for the GET request.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
url = f"{self.baseUrl}{self.apiUrl}{endpoint}"
|
||||
auth = (self.username, self.password)
|
||||
headers = {"Host": self.host}
|
||||
|
||||
response = requests.get(
|
||||
url,
|
||||
params=params,
|
||||
auth=auth,
|
||||
headers=headers,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
return response
|
||||
|
||||
def post(self, endpoint, data):
|
||||
"""
|
||||
Make a POST request to the mailcow API.
|
||||
:param endpoint: The API endpoint to post to.
|
||||
:param data: Data to be sent in the POST request.
|
||||
:return: Response from the mailcow API.
|
||||
"""
|
||||
url = f"{self.baseUrl}{self.apiUrl}{endpoint}"
|
||||
auth = (self.username, self.password)
|
||||
headers = {"Host": self.host}
|
||||
|
||||
response = requests.post(
|
||||
url,
|
||||
json=data,
|
||||
auth=auth,
|
||||
headers=headers,
|
||||
verify=not self.ignore_ssl_errors
|
||||
)
|
||||
return response
|
||||
|
||||
37
data/Dockerfiles/controller/mailcow-adm/modules/Utils.py
Normal file
37
data/Dockerfiles/controller/mailcow-adm/modules/Utils.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
|
||||
|
||||
class Utils:
|
||||
def __init(self):
|
||||
pass
|
||||
|
||||
def normalize_email(self, email):
|
||||
replacements = {
|
||||
"ä": "ae", "ö": "oe", "ü": "ue", "ß": "ss",
|
||||
"Ä": "Ae", "Ö": "Oe", "Ü": "Ue"
|
||||
}
|
||||
for orig, repl in replacements.items():
|
||||
email = email.replace(orig, repl)
|
||||
return email
|
||||
|
||||
def generate_password(self, length=8):
|
||||
chars = string.ascii_letters + string.digits
|
||||
return ''.join(random.choices(chars, k=length))
|
||||
|
||||
def pprint(self, data=""):
|
||||
"""
|
||||
Pretty print a dictionary, list, or text.
|
||||
If data is a text containing JSON, it will be printed in a formatted way.
|
||||
"""
|
||||
if isinstance(data, (dict, list)):
|
||||
print(json.dumps(data, indent=2, ensure_ascii=False))
|
||||
elif isinstance(data, str):
|
||||
try:
|
||||
json_data = json.loads(data)
|
||||
print(json.dumps(json_data, indent=2, ensure_ascii=False))
|
||||
except json.JSONDecodeError:
|
||||
print(data)
|
||||
else:
|
||||
print(data)
|
||||
4
data/Dockerfiles/controller/mailcow-adm/requirements.txt
Normal file
4
data/Dockerfiles/controller/mailcow-adm/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
jinja2
|
||||
requests
|
||||
mysql-connector-python
|
||||
pytest
|
||||
@@ -0,0 +1,94 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.DomainModel import DomainModel
|
||||
from models.AliasModel import AliasModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Generate random alias
|
||||
random_alias = f"alias_test{os.urandom(4).hex()}@mailcow.local"
|
||||
|
||||
# Create an instance of AliasModel
|
||||
model = AliasModel(
|
||||
address=random_alias,
|
||||
goto="test@mailcow.local,test2@mailcow.local"
|
||||
)
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "alias", "Parser command should be 'alias'"
|
||||
|
||||
# add Domain for testing
|
||||
domain_model = DomainModel(domain="mailcow.local")
|
||||
domain_model.add()
|
||||
|
||||
# 1. Alias add tests, should success
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "success", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 3, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "alias_added", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'alias_added'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# Assign created alias ID for further tests
|
||||
model.id = r_add[0]['msg'][2]
|
||||
|
||||
# 2. Alias add tests, should fail because the alias already exists
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "danger", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "is_alias_or_mailbox", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'is_alias_or_mailbox'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 3. Alias get tests
|
||||
r_get = model.get()
|
||||
assert isinstance(r_get, dict), f"Expected a dict but received: {json.dumps(r_get, indent=2)}"
|
||||
assert "domain" in r_get, f"'domain' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "goto" in r_get, f"'goto' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "address" in r_get, f"'address' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert r_get['domain'] == model.address.split("@")[1], f"Wrong 'domain' received: {r_get['domain']}, expected: {model.address.split('@')[1]}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get['goto'] == model.goto, f"Wrong 'goto' received: {r_get['goto']}, expected: {model.goto}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get['address'] == model.address, f"Wrong 'address' received: {r_get['address']}, expected: {model.address}\n{json.dumps(r_get, indent=2)}"
|
||||
|
||||
# 4. Alias edit tests
|
||||
model.goto = "test@mailcow.local"
|
||||
model.active = 0
|
||||
r_edit = model.edit()
|
||||
assert isinstance(r_edit, list), f"Expected a array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit) > 0, f"Wrong array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert "type" in r_edit[0], f"'type' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['type'] == "success", f"Wrong 'type' received: {r_edit[0]['type']}\n{json.dumps(r_edit, indent=2)}"
|
||||
assert "msg" in r_edit[0], f"'msg' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert isinstance(r_edit[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit[0]['msg']) > 0 and len(r_edit[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['msg'][0] == "alias_modified", f"Wrong 'msg' received: {r_edit[0]['msg'][0]}, expected: 'alias_modified'\n{json.dumps(r_edit, indent=2)}"
|
||||
|
||||
# 5. Alias delete tests
|
||||
r_delete = model.delete()
|
||||
assert isinstance(r_delete, list), f"Expected a array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete) > 0, f"Wrong array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert "type" in r_delete[0], f"'type' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['type'] == "success", f"Wrong 'type' received: {r_delete[0]['type']}\n{json.dumps(r_delete, indent=2)}"
|
||||
assert "msg" in r_delete[0], f"'msg' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert isinstance(r_delete[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete[0]['msg']) > 0 and len(r_delete[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['msg'][0] == "alias_removed", f"Wrong 'msg' received: {r_delete[0]['msg'][0]}, expected: 'alias_removed'\n{json.dumps(r_delete, indent=2)}"
|
||||
|
||||
# delete testing Domain
|
||||
domain_model.delete()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running AliasModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
@@ -0,0 +1,71 @@
|
||||
import pytest
|
||||
from models.BaseModel import BaseModel
|
||||
|
||||
|
||||
class Args:
|
||||
def __init__(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
def test_has_required_args():
|
||||
BaseModel.required_args = {
|
||||
"test_object": [["arg1"], ["arg2", "arg3"]],
|
||||
}
|
||||
|
||||
# Test cases with Args object
|
||||
args = Args(object="non_existent_object")
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = Args(object="test_object")
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = Args(object="test_object", arg1="value")
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
|
||||
args = Args(object="test_object", arg2="value")
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = Args(object="test_object", arg3="value")
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = Args(object="test_object", arg2="value", arg3="value")
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
|
||||
# Test cases with dict object
|
||||
args = {"object": "non_existent_object"}
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = {"object": "test_object"}
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = {"object": "test_object", "arg1": "value"}
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
|
||||
args = {"object": "test_object", "arg2": "value"}
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = {"object": "test_object", "arg3": "value"}
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = {"object": "test_object", "arg2": "value", "arg3": "value"}
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
|
||||
|
||||
BaseModel.required_args = {
|
||||
"test_object": [[]],
|
||||
}
|
||||
|
||||
# Test cases with Args object
|
||||
args = Args(object="non_existent_object")
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = Args(object="test_object")
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
|
||||
# Test cases with dict object
|
||||
args = {"object": "non_existent_object"}
|
||||
assert BaseModel.has_required_args(args) == False
|
||||
|
||||
args = {"object": "test_object"}
|
||||
assert BaseModel.has_required_args(args) == True
|
||||
@@ -0,0 +1,74 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.DomainModel import DomainModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Create an instance of DomainModel
|
||||
model = DomainModel(
|
||||
domain="mailcow.local",
|
||||
)
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "domain", "Parser command should be 'domain'"
|
||||
|
||||
# 1. Domain add tests, should success
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0 and len(r_add) >= 2, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[1], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[1]['type'] == "success", f"Wrong 'type' received: {r_add[1]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[1], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[1]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[1]['msg']) > 0 and len(r_add[1]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[1]['msg'][0] == "domain_added", f"Wrong 'msg' received: {r_add[1]['msg'][0]}, expected: 'domain_added'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 2. Domain add tests, should fail because the domain already exists
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "danger", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "domain_exists", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'domain_exists'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 3. Domain get tests
|
||||
r_get = model.get()
|
||||
assert isinstance(r_get, dict), f"Expected a dict but received: {json.dumps(r_get, indent=2)}"
|
||||
assert "domain_name" in r_get, f"'domain_name' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert r_get['domain_name'] == model.domain, f"Wrong 'domain_name' received: {r_get['domain_name']}, expected: {model.domain}\n{json.dumps(r_get, indent=2)}"
|
||||
|
||||
# 4. Domain edit tests
|
||||
model.active = 0
|
||||
r_edit = model.edit()
|
||||
assert isinstance(r_edit, list), f"Expected a array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit) > 0, f"Wrong array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert "type" in r_edit[0], f"'type' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['type'] == "success", f"Wrong 'type' received: {r_edit[0]['type']}\n{json.dumps(r_edit, indent=2)}"
|
||||
assert "msg" in r_edit[0], f"'msg' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert isinstance(r_edit[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit[0]['msg']) > 0 and len(r_edit[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['msg'][0] == "domain_modified", f"Wrong 'msg' received: {r_edit[0]['msg'][0]}, expected: 'domain_modified'\n{json.dumps(r_edit, indent=2)}"
|
||||
|
||||
# 5. Domain delete tests
|
||||
r_delete = model.delete()
|
||||
assert isinstance(r_delete, list), f"Expected a array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete) > 0, f"Wrong array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert "type" in r_delete[0], f"'type' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['type'] == "success", f"Wrong 'type' received: {r_delete[0]['type']}\n{json.dumps(r_delete, indent=2)}"
|
||||
assert "msg" in r_delete[0], f"'msg' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert isinstance(r_delete[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete[0]['msg']) > 0 and len(r_delete[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['msg'][0] == "domain_removed", f"Wrong 'msg' received: {r_delete[0]['msg'][0]}, expected: 'domain_removed'\n{json.dumps(r_delete, indent=2)}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running DomainModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
@@ -0,0 +1,89 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.DomainModel import DomainModel
|
||||
from models.DomainadminModel import DomainadminModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Generate random domainadmin
|
||||
random_username = f"dadmin_test{os.urandom(4).hex()}"
|
||||
random_password = f"{os.urandom(4).hex()}"
|
||||
|
||||
# Create an instance of DomainadminModel
|
||||
model = DomainadminModel(
|
||||
username=random_username,
|
||||
password=random_password,
|
||||
domains="mailcow.local",
|
||||
)
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "domainadmin", "Parser command should be 'domainadmin'"
|
||||
|
||||
# add Domain for testing
|
||||
domain_model = DomainModel(domain="mailcow.local")
|
||||
domain_model.add()
|
||||
|
||||
# 1. Domainadmin add tests, should success
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "success", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 3, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "domain_admin_added", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'domain_admin_added'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 2. Domainadmin add tests, should fail because the domainadmin already exists
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "danger", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "object_exists", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'object_exists'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 3. Domainadmin get tests
|
||||
r_get = model.get()
|
||||
assert isinstance(r_get, dict), f"Expected a dict but received: {json.dumps(r_get, indent=2)}"
|
||||
assert "selected_domains" in r_get, f"'selected_domains' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "username" in r_get, f"'username' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert set(model.domains.replace(" ", "").split(",")) == set(r_get['selected_domains']), f"Wrong 'selected_domains' received: {r_get['selected_domains']}, expected: {model.domains}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get['username'] == model.username, f"Wrong 'username' received: {r_get['username']}, expected: {model.username}\n{json.dumps(r_get, indent=2)}"
|
||||
|
||||
# 4. Domainadmin edit tests
|
||||
model.active = 0
|
||||
r_edit = model.edit()
|
||||
assert isinstance(r_edit, list), f"Expected a array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit) > 0, f"Wrong array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert "type" in r_edit[0], f"'type' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['type'] == "success", f"Wrong 'type' received: {r_edit[0]['type']}\n{json.dumps(r_edit, indent=2)}"
|
||||
assert "msg" in r_edit[0], f"'msg' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert isinstance(r_edit[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit[0]['msg']) > 0 and len(r_edit[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['msg'][0] == "domain_admin_modified", f"Wrong 'msg' received: {r_edit[0]['msg'][0]}, expected: 'domain_admin_modified'\n{json.dumps(r_edit, indent=2)}"
|
||||
|
||||
# 5. Domainadmin delete tests
|
||||
r_delete = model.delete()
|
||||
assert isinstance(r_delete, list), f"Expected a array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete) > 0, f"Wrong array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert "type" in r_delete[0], f"'type' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['type'] == "success", f"Wrong 'type' received: {r_delete[0]['type']}\n{json.dumps(r_delete, indent=2)}"
|
||||
assert "msg" in r_delete[0], f"'msg' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert isinstance(r_delete[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete[0]['msg']) > 0 and len(r_delete[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['msg'][0] == "domain_admin_removed", f"Wrong 'msg' received: {r_delete[0]['msg'][0]}, expected: 'domain_admin_removed'\n{json.dumps(r_delete, indent=2)}"
|
||||
|
||||
# delete testing Domain
|
||||
domain_model.delete()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running DomainadminModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
@@ -0,0 +1,89 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.DomainModel import DomainModel
|
||||
from models.MailboxModel import MailboxModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Generate random mailbox
|
||||
random_username = f"mbox_test{os.urandom(4).hex()}@mailcow.local"
|
||||
random_password = f"{os.urandom(4).hex()}"
|
||||
|
||||
# Create an instance of MailboxModel
|
||||
model = MailboxModel(
|
||||
username=random_username,
|
||||
password=random_password
|
||||
)
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "mailbox", "Parser command should be 'mailbox'"
|
||||
|
||||
# add Domain for testing
|
||||
domain_model = DomainModel(domain="mailcow.local")
|
||||
domain_model.add()
|
||||
|
||||
# 1. Mailbox add tests, should success
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0 and len(r_add) <= 2, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[1], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[1]['type'] == "success", f"Wrong 'type' received: {r_add[1]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[1], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[1]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[1]['msg']) > 0 and len(r_add[1]['msg']) <= 3, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[1]['msg'][0] == "mailbox_added", f"Wrong 'msg' received: {r_add[1]['msg'][0]}, expected: 'mailbox_added'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 2. Mailbox add tests, should fail because the mailbox already exists
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "danger", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "object_exists", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'object_exists'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 3. Mailbox get tests
|
||||
r_get = model.get()
|
||||
assert isinstance(r_get, dict), f"Expected a dict but received: {json.dumps(r_get, indent=2)}"
|
||||
assert "domain" in r_get, f"'domain' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "local_part" in r_get, f"'local_part' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert r_get['domain'] == model.domain, f"Wrong 'domain' received: {r_get['domain']}, expected: {model.domain}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get['local_part'] == model.local_part, f"Wrong 'local_part' received: {r_get['local_part']}, expected: {model.local_part}\n{json.dumps(r_get, indent=2)}"
|
||||
|
||||
# 4. Mailbox edit tests
|
||||
model.active = 0
|
||||
r_edit = model.edit()
|
||||
assert isinstance(r_edit, list), f"Expected a array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit) > 0, f"Wrong array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert "type" in r_edit[0], f"'type' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['type'] == "success", f"Wrong 'type' received: {r_edit[0]['type']}\n{json.dumps(r_edit, indent=2)}"
|
||||
assert "msg" in r_edit[0], f"'msg' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert isinstance(r_edit[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit[0]['msg']) > 0 and len(r_edit[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['msg'][0] == "mailbox_modified", f"Wrong 'msg' received: {r_edit[0]['msg'][0]}, expected: 'mailbox_modified'\n{json.dumps(r_edit, indent=2)}"
|
||||
|
||||
# 5. Mailbox delete tests
|
||||
r_delete = model.delete()
|
||||
assert isinstance(r_delete, list), f"Expected a array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete) > 0, f"Wrong array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert "type" in r_delete[0], f"'type' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['type'] == "success", f"Wrong 'type' received: {r_delete[0]['type']}\n{json.dumps(r_delete, indent=2)}"
|
||||
assert "msg" in r_delete[0], f"'msg' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert isinstance(r_delete[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete[0]['msg']) > 0 and len(r_delete[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['msg'][0] == "mailbox_removed", f"Wrong 'msg' received: {r_delete[0]['msg'][0]}, expected: 'mailbox_removed'\n{json.dumps(r_delete, indent=2)}"
|
||||
|
||||
# delete testing Domain
|
||||
domain_model.delete()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running MailboxModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
@@ -0,0 +1,39 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.StatusModel import StatusModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Create an instance of StatusModel
|
||||
model = StatusModel()
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "status", "Parser command should be 'status'"
|
||||
|
||||
# 1. Status version tests
|
||||
r_version = model.version()
|
||||
assert isinstance(r_version, dict), f"Expected a dict but received: {json.dumps(r_version, indent=2)}"
|
||||
assert "version" in r_version, f"'version' key missing in response: {json.dumps(r_version, indent=2)}"
|
||||
|
||||
# 2. Status vmail tests
|
||||
r_vmail = model.vmail()
|
||||
assert isinstance(r_vmail, dict), f"Expected a dict but received: {json.dumps(r_vmail, indent=2)}"
|
||||
assert "type" in r_vmail, f"'type' key missing in response: {json.dumps(r_vmail, indent=2)}"
|
||||
assert "disk" in r_vmail, f"'disk' key missing in response: {json.dumps(r_vmail, indent=2)}"
|
||||
assert "used" in r_vmail, f"'used' key missing in response: {json.dumps(r_vmail, indent=2)}"
|
||||
assert "total" in r_vmail, f"'total' key missing in response: {json.dumps(r_vmail, indent=2)}"
|
||||
assert "used_percent" in r_vmail, f"'used_percent' key missing in response: {json.dumps(r_vmail, indent=2)}"
|
||||
|
||||
# 3. Status containers tests
|
||||
r_containers = model.containers()
|
||||
assert isinstance(r_containers, dict), f"Expected a dict but received: {json.dumps(r_containers, indent=2)}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running StatusModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
@@ -0,0 +1,106 @@
|
||||
import pytest
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
|
||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "../")))
|
||||
from models.DomainModel import DomainModel
|
||||
from models.MailboxModel import MailboxModel
|
||||
from models.SyncjobModel import SyncjobModel
|
||||
|
||||
|
||||
def test_model():
|
||||
# Generate random Mailbox
|
||||
random_username = f"mbox_test@mailcow.local"
|
||||
random_password = f"{os.urandom(4).hex()}"
|
||||
|
||||
# Create an instance of SyncjobModel
|
||||
model = SyncjobModel(
|
||||
username=random_username,
|
||||
host1="mailcow.local",
|
||||
port1=993,
|
||||
user1="testuser@mailcow.local",
|
||||
password1="testpassword",
|
||||
enc1="SSL",
|
||||
)
|
||||
|
||||
# Test the parser_command attribute
|
||||
assert model.parser_command == "syncjob", "Parser command should be 'syncjob'"
|
||||
|
||||
# add Domain and Mailbox for testing
|
||||
domain_model = DomainModel(domain="mailcow.local")
|
||||
domain_model.add()
|
||||
mbox_model = MailboxModel(username=random_username, password=random_password)
|
||||
mbox_model.add()
|
||||
|
||||
# 1. Syncjob add tests, should success
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0 and len(r_add) <= 2, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "success", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 3, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "mailbox_modified", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'mailbox_modified'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# Assign created syncjob ID for further tests
|
||||
model.id = r_add[0]['msg'][2]
|
||||
|
||||
# 2. Syncjob add tests, should fail because the syncjob already exists
|
||||
r_add = model.add()
|
||||
assert isinstance(r_add, list), f"Expected a array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add) > 0, f"Wrong array received: {json.dumps(r_add, indent=2)}"
|
||||
assert "type" in r_add[0], f"'type' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['type'] == "danger", f"Wrong 'type' received: {r_add[0]['type']}\n{json.dumps(r_add, indent=2)}"
|
||||
assert "msg" in r_add[0], f"'msg' key missing in response: {json.dumps(r_add, indent=2)}"
|
||||
assert isinstance(r_add[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_add, indent=2)}"
|
||||
assert len(r_add[0]['msg']) > 0 and len(r_add[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_add, indent=2)}"
|
||||
assert r_add[0]['msg'][0] == "object_exists", f"Wrong 'msg' received: {r_add[0]['msg'][0]}, expected: 'object_exists'\n{json.dumps(r_add, indent=2)}"
|
||||
|
||||
# 3. Syncjob get tests
|
||||
r_get = model.get()
|
||||
assert isinstance(r_get, list), f"Expected a list but received: {json.dumps(r_get, indent=2)}"
|
||||
assert "user2" in r_get[0], f"'user2' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "host1" in r_get[0], f"'host1' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "port1" in r_get[0], f"'port1' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "user1" in r_get[0], f"'user1' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert "enc1" in r_get[0], f"'enc1' key missing in response: {json.dumps(r_get, indent=2)}"
|
||||
assert r_get[0]['user2'] == model.username, f"Wrong 'user2' received: {r_get[0]['user2']}, expected: {model.username}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get[0]['host1'] == model.host1, f"Wrong 'host1' received: {r_get[0]['host1']}, expected: {model.host1}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get[0]['port1'] == model.port1, f"Wrong 'port1' received: {r_get[0]['port1']}, expected: {model.port1}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get[0]['user1'] == model.user1, f"Wrong 'user1' received: {r_get[0]['user1']}, expected: {model.user1}\n{json.dumps(r_get, indent=2)}"
|
||||
assert r_get[0]['enc1'] == model.enc1, f"Wrong 'enc1' received: {r_get[0]['enc1']}, expected: {model.enc1}\n{json.dumps(r_get, indent=2)}"
|
||||
|
||||
# 4. Syncjob edit tests
|
||||
model.active = 1
|
||||
r_edit = model.edit()
|
||||
assert isinstance(r_edit, list), f"Expected a array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit) > 0, f"Wrong array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert "type" in r_edit[0], f"'type' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['type'] == "success", f"Wrong 'type' received: {r_edit[0]['type']}\n{json.dumps(r_edit, indent=2)}"
|
||||
assert "msg" in r_edit[0], f"'msg' key missing in response: {json.dumps(r_edit, indent=2)}"
|
||||
assert isinstance(r_edit[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_edit, indent=2)}"
|
||||
assert len(r_edit[0]['msg']) > 0 and len(r_edit[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_edit, indent=2)}"
|
||||
assert r_edit[0]['msg'][0] == "mailbox_modified", f"Wrong 'msg' received: {r_edit[0]['msg'][0]}, expected: 'mailbox_modified'\n{json.dumps(r_edit, indent=2)}"
|
||||
|
||||
# 5. Syncjob delete tests
|
||||
r_delete = model.delete()
|
||||
assert isinstance(r_delete, list), f"Expected a array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete) > 0, f"Wrong array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert "type" in r_delete[0], f"'type' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['type'] == "success", f"Wrong 'type' received: {r_delete[0]['type']}\n{json.dumps(r_delete, indent=2)}"
|
||||
assert "msg" in r_delete[0], f"'msg' key missing in response: {json.dumps(r_delete, indent=2)}"
|
||||
assert isinstance(r_delete[0]['msg'], list), f"Expected a 'msg' array but received: {json.dumps(r_delete, indent=2)}"
|
||||
assert len(r_delete[0]['msg']) > 0 and len(r_delete[0]['msg']) <= 2, f"Wrong 'msg' array received: {json.dumps(r_delete, indent=2)}"
|
||||
assert r_delete[0]['msg'][0] == "deleted_syncjob", f"Wrong 'msg' received: {r_delete[0]['msg'][0]}, expected: 'deleted_syncjob'\n{json.dumps(r_delete, indent=2)}"
|
||||
|
||||
# delete testing Domain and Mailbox
|
||||
mbox_model.delete()
|
||||
domain_model.delete()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("Running SyncjobModel tests...")
|
||||
test_model()
|
||||
print("All tests passed!")
|
||||
17
data/Dockerfiles/controller/supervisord.conf
Normal file
17
data/Dockerfiles/controller/supervisord.conf
Normal file
@@ -0,0 +1,17 @@
|
||||
[supervisord]
|
||||
nodaemon=true
|
||||
user=root
|
||||
pidfile=/var/run/supervisord.pid
|
||||
|
||||
[program:api]
|
||||
command=python /app/api/main.py
|
||||
autostart=true
|
||||
autorestart=true
|
||||
stdout_logfile=/dev/stdout
|
||||
stderr_logfile=/dev/stderr
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile_maxbytes=0
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
@@ -1,9 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
`openssl req -x509 -newkey rsa:4096 -sha256 -days 3650 -nodes \
|
||||
-keyout /app/dockerapi_key.pem \
|
||||
-out /app/dockerapi_cert.pem \
|
||||
-subj /CN=dockerapi/O=mailcow \
|
||||
-addext subjectAltName=DNS:dockerapi`
|
||||
|
||||
exec "$@"
|
||||
@@ -3,7 +3,7 @@ FROM alpine:3.21
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.16
|
||||
ARG GOSU_VERSION=1.19
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV LC_ALL=C.UTF-8
|
||||
@@ -87,11 +87,11 @@ RUN addgroup -g 5000 vmail \
|
||||
perl-proc-processtable \
|
||||
perl-app-cpanminus \
|
||||
procps \
|
||||
python3 py3-pip python3-dev \
|
||||
python3 \
|
||||
py3-mysqlclient \
|
||||
py3-html2text \
|
||||
linux-headers \
|
||||
musl-dev \
|
||||
gcc \
|
||||
py3-jinja2 \
|
||||
py3-redis \
|
||||
redis \
|
||||
syslog-ng \
|
||||
syslog-ng-redis \
|
||||
@@ -115,36 +115,25 @@ RUN addgroup -g 5000 vmail \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
&& gosu nobody true
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/dovecot/trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY data/Dockerfiles/dovecot/clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
COPY data/Dockerfiles/dovecot/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/dovecot/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/dovecot/imapsync /usr/local/bin/imapsync
|
||||
COPY data/Dockerfiles/dovecot/imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
||||
COPY data/Dockerfiles/dovecot/report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
COPY data/Dockerfiles/dovecot/report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
COPY data/Dockerfiles/dovecot/rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
||||
COPY data/Dockerfiles/dovecot/rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
||||
COPY data/Dockerfiles/dovecot/sa-rules.sh /usr/local/bin/sa-rules.sh
|
||||
COPY data/Dockerfiles/dovecot/docker-entrypoint.sh /
|
||||
COPY data/Dockerfiles/dovecot/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/dovecot/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/dovecot/quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY data/Dockerfiles/dovecot/quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY data/Dockerfiles/dovecot/repl_health.sh /usr/local/bin/repl_health.sh
|
||||
COPY data/Dockerfiles/dovecot/optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
COPY trim_logs.sh /usr/local/bin/trim_logs.sh
|
||||
COPY clean_q_aged.sh /usr/local/bin/clean_q_aged.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY imapsync /usr/local/bin/imapsync
|
||||
COPY imapsync_runner.pl /usr/local/bin/imapsync_runner.pl
|
||||
COPY report-spam.sieve /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
COPY report-ham.sieve /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
COPY rspamd-pipe-ham /usr/lib/dovecot/sieve/rspamd-pipe-ham
|
||||
COPY rspamd-pipe-spam /usr/lib/dovecot/sieve/rspamd-pipe-spam
|
||||
COPY sa-rules.sh /usr/local/bin/sa-rules.sh
|
||||
COPY maildir_gc.sh /usr/local/bin/maildir_gc.sh
|
||||
COPY docker-entrypoint.sh /
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY quarantine_notify.py /usr/local/bin/quarantine_notify.py
|
||||
COPY quota_notify.py /usr/local/bin/quota_notify.py
|
||||
COPY repl_health.sh /usr/local/bin/repl_health.sh
|
||||
COPY optimize-fts.sh /usr/local/bin/optimize-fts.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -1,15 +1,254 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
${REDIS_CMDLINE} SET DOVECOT_REPL_HEALTH 1 > /dev/null
|
||||
|
||||
# Create missing directories
|
||||
[[ ! -d /etc/dovecot/sql/ ]] && mkdir -p /etc/dovecot/sql/
|
||||
[[ ! -d /etc/dovecot/auth/ ]] && mkdir -p /etc/dovecot/auth/
|
||||
[[ ! -d /etc/dovecot/conf.d/ ]] && mkdir -p /etc/dovecot/conf.d/
|
||||
[[ ! -d /var/vmail/_garbage ]] && mkdir -p /var/vmail/_garbage
|
||||
[[ ! -d /var/vmail/sieve ]] && mkdir -p /var/vmail/sieve
|
||||
[[ ! -d /etc/sogo ]] && mkdir -p /etc/sogo
|
||||
[[ ! -d /var/volatile ]] && mkdir -p /var/volatile
|
||||
|
||||
# Set Dovecot sql config parameters, escape " in db password
|
||||
DBPASS=$(echo ${DBPASS} | sed 's/"/\\"/g')
|
||||
|
||||
# Create quota dict for Dovecot
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
QUOTA_TABLE=quota2
|
||||
else
|
||||
QUOTA_TABLE=quota2replica
|
||||
fi
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-quota.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/quota/storage
|
||||
table = ${QUOTA_TABLE}
|
||||
username_field = username
|
||||
value_field = bytes
|
||||
}
|
||||
map {
|
||||
pattern = priv/quota/messages
|
||||
table = ${QUOTA_TABLE}
|
||||
username_field = username
|
||||
value_field = messages
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create dict used for sieve pre and postfilters
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_before.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/sieve/name/\$script_name
|
||||
table = sieve_before
|
||||
username_field = username
|
||||
value_field = id
|
||||
fields {
|
||||
script_name = \$script_name
|
||||
}
|
||||
}
|
||||
map {
|
||||
pattern = priv/sieve/data/\$id
|
||||
table = sieve_before
|
||||
username_field = username
|
||||
value_field = script_data
|
||||
fields {
|
||||
id = \$id
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-sieve_after.conf
|
||||
# Autogenerated by mailcow
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
map {
|
||||
pattern = priv/sieve/name/\$script_name
|
||||
table = sieve_after
|
||||
username_field = username
|
||||
value_field = id
|
||||
fields {
|
||||
script_name = \$script_name
|
||||
}
|
||||
}
|
||||
map {
|
||||
pattern = priv/sieve/data/\$id
|
||||
table = sieve_after
|
||||
username_field = username
|
||||
value_field = script_data
|
||||
fields {
|
||||
id = \$id
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo -n ${ACL_ANYONE} > /etc/dovecot/acl_anyone
|
||||
|
||||
if [[ "${SKIP_FTS}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -e "\e[33mDetecting SKIP_FTS=y... not enabling Flatcurve (FTS) then...\e[0m"
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify listescape replication mail_log' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
else
|
||||
echo -e "\e[32mDetecting SKIP_FTS=n... enabling Flatcurve (FTS)\e[0m"
|
||||
echo -n 'quota acl zlib mail_crypt mail_crypt_acl mail_log notify fts fts_flatcurve listescape replication lazy_expunge' > /etc/dovecot/mail_plugins
|
||||
echo -n 'quota imap_quota imap_acl acl zlib imap_zlib imap_sieve mail_crypt mail_crypt_acl notify mail_log fts fts_flatcurve listescape replication' > /etc/dovecot/mail_plugins_imap
|
||||
echo -n 'quota sieve acl zlib mail_crypt mail_crypt_acl fts fts_flatcurve notify listescape replication' > /etc/dovecot/mail_plugins_lmtp
|
||||
fi
|
||||
chmod 644 /etc/dovecot/mail_plugins /etc/dovecot/mail_plugins_imap /etc/dovecot/mail_plugins_lmtp /templates/quarantine.tpl
|
||||
|
||||
cat <<EOF > /etc/dovecot/sql/dovecot-dict-sql-userdb.conf
|
||||
# Autogenerated by mailcow
|
||||
driver = mysql
|
||||
connect = "host=/var/run/mysqld/mysqld.sock dbname=${DBNAME} user=${DBUSER} password=${DBPASS}"
|
||||
user_query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%n/${MAILDIR_SUB}:VOLATILEDIR=/var/volatile/%u:INDEX=/var/vmail_index/%u') AS mail, '%s' AS protocol, 5000 AS uid, 5000 AS gid, concat('*:bytes=', quota) AS quota_rule FROM mailbox WHERE username = '%u' AND (active = '1' OR active = '2')
|
||||
iterate_query = SELECT username FROM mailbox WHERE active = '1' OR active = '2';
|
||||
EOF
|
||||
|
||||
|
||||
# Migrate old sieve_after file
|
||||
[[ -f /etc/dovecot/sieve_after ]] && mv /etc/dovecot/sieve_after /etc/dovecot/global_sieve_after
|
||||
# Create global sieve scripts
|
||||
cat /etc/dovecot/global_sieve_after > /var/vmail/sieve/global_sieve_after.sieve
|
||||
cat /etc/dovecot/global_sieve_before > /var/vmail/sieve/global_sieve_before.sieve
|
||||
|
||||
# Check permissions of vmail/index/garbage directories.
|
||||
# Do not do this every start-up, it may take a very long time. So we use a stat check here.
|
||||
if [[ $(stat -c %U /var/vmail/) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail ; fi
|
||||
if [[ $(stat -c %U /var/vmail/_garbage) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail/_garbage ; fi
|
||||
if [[ $(stat -c %U /var/vmail_index) != "vmail" ]] ; then chown -R vmail:vmail /var/vmail_index ; fi
|
||||
|
||||
# Cleanup random user maildirs
|
||||
rm -rf /var/vmail/mailcow.local/*
|
||||
# Cleanup PIDs
|
||||
[[ -f /tmp/quarantine_notify.pid ]] && rm /tmp/quarantine_notify.pid
|
||||
|
||||
# create sni configuration
|
||||
echo "" > /etc/dovecot/sni.conf
|
||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
||||
continue
|
||||
fi
|
||||
domains=($(cat ${cert_dir}domains))
|
||||
for domain in ${domains[@]}; do
|
||||
echo 'local_name '${domain}' {' >> /etc/dovecot/sni.conf;
|
||||
echo ' ssl_cert = <'${cert_dir}'cert.pem' >> /etc/dovecot/sni.conf;
|
||||
echo ' ssl_key = <'${cert_dir}'key.pem' >> /etc/dovecot/sni.conf;
|
||||
echo '}' >> /etc/dovecot/sni.conf;
|
||||
done
|
||||
done
|
||||
|
||||
# Create random master for SOGo sieve features
|
||||
RAND_USER=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 16 | head -n 1)
|
||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 24 | head -n 1)
|
||||
|
||||
if [[ ! -z ${DOVECOT_MASTER_USER} ]] && [[ ! -z ${DOVECOT_MASTER_PASS} ]]; then
|
||||
RAND_USER=${DOVECOT_MASTER_USER}
|
||||
RAND_PASS=${DOVECOT_MASTER_PASS}
|
||||
fi
|
||||
echo ${RAND_USER}@mailcow.local:{SHA1}$(echo -n ${RAND_PASS} | sha1sum | awk '{print $1}'):::::: > /etc/dovecot/dovecot-master.passwd
|
||||
echo ${RAND_USER}@mailcow.local::5000:5000:::: > /etc/dovecot/dovecot-master.userdb
|
||||
echo ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/sieve.creds
|
||||
|
||||
if [[ -z ${MAILDIR_SUB} ]]; then
|
||||
MAILDIR_SUB_SHARED=
|
||||
else
|
||||
MAILDIR_SUB_SHARED=/${MAILDIR_SUB}
|
||||
fi
|
||||
cat <<EOF > /etc/dovecot/shared_namespace.conf
|
||||
# Autogenerated by mailcow
|
||||
namespace {
|
||||
type = shared
|
||||
separator = /
|
||||
prefix = Shared/%%u/
|
||||
location = maildir:%%h${MAILDIR_SUB_SHARED}:INDEX=~${MAILDIR_SUB_SHARED}/Shared/%%u
|
||||
subscriptions = no
|
||||
list = children
|
||||
}
|
||||
EOF
|
||||
|
||||
|
||||
cat <<EOF > /etc/dovecot/sogo_trusted_ip.conf
|
||||
# Autogenerated by mailcow
|
||||
remote ${IPV4_NETWORK}.248 {
|
||||
disable_plaintext_auth = no
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create random master Password for SOGo SSO
|
||||
RAND_PASS=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 32 | head -n 1)
|
||||
echo -n ${RAND_PASS} > /etc/phpfpm/sogo-sso.pass
|
||||
cat <<EOF > /etc/dovecot/sogo-sso.conf
|
||||
# Autogenerated by mailcow
|
||||
passdb {
|
||||
driver = static
|
||||
args = allow_nets=${IPV4_NETWORK}.248/32 password={plain}${RAND_PASS}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Creating additional creds file for SOGo notify crons (calendars, etc) (dummy user, sso password)
|
||||
echo -n ${RAND_USER}@mailcow.local:${RAND_PASS} > /etc/sogo/cron.creds
|
||||
|
||||
if [[ "${MASTER}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
# Toggling MASTER will result in a rebuild of containers, so the quota script will be recreated
|
||||
cat <<'EOF' > /usr/local/bin/quota_notify.py
|
||||
#!/usr/bin/python3
|
||||
import sys
|
||||
sys.exit()
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Set mail_replica for HA setups
|
||||
if [[ -n ${MAILCOW_REPLICA_IP} && -n ${DOVEADM_REPLICA_PORT} ]]; then
|
||||
cat <<EOF > /etc/dovecot/mail_replica.conf
|
||||
# Autogenerated by mailcow
|
||||
mail_replica = tcp:${MAILCOW_REPLICA_IP}:${DOVEADM_REPLICA_PORT}
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Setting variables for indexer-worker inside fts.conf automatically according to mailcow.conf settings
|
||||
if [[ "${SKIP_FTS}" =~ ^([nN][oO]|[nN])+$ ]]; then
|
||||
echo -e "\e[94mConfiguring FTS Settings...\e[0m"
|
||||
echo -e "\e[94mSetting FTS Memory Limit (per process) to ${FTS_HEAP} MB\e[0m"
|
||||
sed -i "s/vsz_limit\s*=\s*[0-9]*\s*MB*/vsz_limit=${FTS_HEAP} MB/" /etc/dovecot/conf.d/fts.conf
|
||||
echo -e "\e[94mSetting FTS Process Limit to ${FTS_PROCS}\e[0m"
|
||||
sed -i "s/process_limit\s*=\s*[0-9]*/process_limit=${FTS_PROCS}/" /etc/dovecot/conf.d/fts.conf
|
||||
fi
|
||||
|
||||
# 401 is user dovecot
|
||||
if [[ ! -s /mail_crypt/ecprivkey.pem || ! -s /mail_crypt/ecpubkey.pem ]]; then
|
||||
openssl ecparam -name prime256v1 -genkey | openssl pkey -out /mail_crypt/ecprivkey.pem
|
||||
openssl pkey -in /mail_crypt/ecprivkey.pem -pubout -out /mail_crypt/ecpubkey.pem
|
||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
||||
else
|
||||
chown 401 /mail_crypt/ecprivkey.pem /mail_crypt/ecpubkey.pem
|
||||
fi
|
||||
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.conf /etc/dovecot/extra.conf; then
|
||||
@@ -22,10 +261,89 @@ if grep -qE 'ssl_min_protocol\s*=\s*(TLSv1|TLSv1\.1)\s*$' /etc/dovecot/dovecot.c
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Dovecot."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
# Compile sieve scripts
|
||||
sievec /var/vmail/sieve/global_sieve_before.sieve
|
||||
sievec /var/vmail/sieve/global_sieve_after.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-spam.sieve
|
||||
sievec /usr/lib/dovecot/sieve/report-ham.sieve
|
||||
|
||||
# Fix permissions
|
||||
chown root:root /etc/dovecot/sql/*.conf
|
||||
chown root:dovecot /etc/dovecot/sql/dovecot-dict-sql-sieve* /etc/dovecot/sql/dovecot-dict-sql-quota* /etc/dovecot/auth/passwd-verify.lua
|
||||
chmod 640 /etc/dovecot/sql/*.conf /etc/dovecot/auth/passwd-verify.lua
|
||||
chown -R vmail:vmail /var/vmail/sieve
|
||||
chown -R vmail:vmail /var/volatile
|
||||
chown -R vmail:vmail /var/vmail_index
|
||||
adduser vmail tty
|
||||
chmod g+rw /dev/console
|
||||
chown root:tty /dev/console
|
||||
chmod +x /usr/lib/dovecot/sieve/rspamd-pipe-ham \
|
||||
/usr/lib/dovecot/sieve/rspamd-pipe-spam \
|
||||
/usr/local/bin/imapsync_runner.pl \
|
||||
/usr/local/bin/imapsync \
|
||||
/usr/local/bin/trim_logs.sh \
|
||||
/usr/local/bin/sa-rules.sh \
|
||||
/usr/local/bin/clean_q_aged.sh \
|
||||
/usr/local/bin/maildir_gc.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/usr/local/bin/quota_notify.py \
|
||||
/usr/local/bin/repl_health.sh \
|
||||
/usr/local/bin/optimize-fts.sh
|
||||
|
||||
# Prepare environment file for cronjobs
|
||||
printenv | sed 's/^\(.*\)$/export \1/g' > /source_env.sh
|
||||
|
||||
# Clean old PID if any
|
||||
[[ -f /var/run/dovecot/master.pid ]] && rm /var/run/dovecot/master.pid
|
||||
|
||||
# Clean stopped imapsync jobs
|
||||
rm -f /tmp/imapsync_busy.lock
|
||||
IMAPSYNC_TABLE=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SHOW TABLES LIKE 'imapsync'" -Bs)
|
||||
[[ ! -z ${IMAPSYNC_TABLE} ]] && mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "UPDATE imapsync SET is_running='0'"
|
||||
|
||||
# Envsubst maildir_gc
|
||||
echo "$(envsubst < /usr/local/bin/maildir_gc.sh)" > /usr/local/bin/maildir_gc.sh
|
||||
|
||||
# GUID generation
|
||||
while [[ ${VERSIONS_OK} != 'OK' ]]; do
|
||||
if [[ ! -z $(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -B -e "SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = \"${DBNAME}\" AND TABLE_NAME = 'versions'") ]]; then
|
||||
VERSIONS_OK=OK
|
||||
else
|
||||
echo "Waiting for versions table to be created..."
|
||||
sleep 3
|
||||
fi
|
||||
done
|
||||
PUBKEY_MCRYPT=$(doveconf -P 2> /dev/null | grep -i mail_crypt_global_public_key | cut -d '<' -f2)
|
||||
if [ -f ${PUBKEY_MCRYPT} ]; then
|
||||
GUID=$(cat <(echo ${MAILCOW_HOSTNAME}) /mail_crypt/ecpubkey.pem | sha256sum | cut -d ' ' -f1 | tr -cd "[a-fA-F0-9.:/] ")
|
||||
if [ ${#GUID} -eq 64 ]; then
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "${GUID}");
|
||||
EOF
|
||||
else
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
REPLACE INTO versions (application, version) VALUES ("GUID", "INVALID");
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Dovecot..."
|
||||
/usr/sbin/dovecot -F
|
||||
# Collect SA rules once now
|
||||
/usr/local/bin/sa-rules.sh
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
# For some strange, unknown and stupid reason, Dovecot may run into a race condition, when this file is not touched before it is read by dovecot/auth
|
||||
# May be related to something inside Docker, I seriously don't know
|
||||
touch /etc/dovecot/auth/passwd-verify.lua
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -132,8 +132,8 @@ while ($row = $sth->fetchrow_arrayref()) {
|
||||
"--tmpdir", "/tmp",
|
||||
"--nofoldersizes",
|
||||
"--addheader",
|
||||
($timeout1 gt "0" ? () : ('--timeout1', $timeout1)),
|
||||
($timeout2 gt "0" ? () : ('--timeout2', $timeout2)),
|
||||
($timeout1 le "0" ? () : ('--timeout1', $timeout1)),
|
||||
($timeout2 le "0" ? () : ('--timeout2', $timeout2)),
|
||||
($exclude eq "" ? () : ("--exclude", $exclude)),
|
||||
($subfolder2 eq "" ? () : ('--subfolder2', $subfolder2)),
|
||||
($maxage eq "0" ? () : ('--maxage', $maxage)),
|
||||
|
||||
2
data/Dockerfiles/dovecot/maildir_gc.sh
Executable file
2
data/Dockerfiles/dovecot/maildir_gc.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
[ -d /var/vmail/_garbage/ ] && /usr/bin/find /var/vmail/_garbage/ -mindepth 1 -maxdepth 1 -type d -cmin +${MAILDIR_GC_TIME} -exec rm -r {} \;
|
||||
@@ -8,7 +8,8 @@ from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import COMMASPACE, formatdate
|
||||
import jinja2
|
||||
from jinja2 import Template
|
||||
from jinja2 import TemplateError
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
import json
|
||||
import redis
|
||||
import time
|
||||
@@ -75,22 +76,27 @@ try:
|
||||
|
||||
def notify_rcpt(rcpt, msg_count, quarantine_acl, category):
|
||||
if category == "add_header": category = "add header"
|
||||
meta_query = query_mysql('SELECT SHA2(CONCAT(id, qid), 256) AS qhash, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
||||
meta_query = query_mysql('SELECT `qhash`, id, subject, score, sender, created, action FROM quarantine WHERE notified = 0 AND rcpt = "%s" AND score < %f AND (action = "%s" OR "all" = "%s")' % (rcpt, max_score, category, category))
|
||||
print("%s: %d of %d messages qualify for notification" % (rcpt, len(meta_query), msg_count))
|
||||
if len(meta_query) == 0:
|
||||
return
|
||||
msg_count = len(meta_query)
|
||||
env = SandboxedEnvironment()
|
||||
if r.get('Q_HTML'):
|
||||
try:
|
||||
template = Template(r.get('Q_HTML'))
|
||||
except:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
try:
|
||||
template = env.from_string(r.get('Q_HTML'))
|
||||
except Exception:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = env.from_string(file_.read())
|
||||
else:
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
||||
with open('/templates/quarantine.tpl') as file_:
|
||||
template = env.from_string(file_.read())
|
||||
try:
|
||||
html = template.render(meta=meta_query, username=rcpt, counter=msg_count, hostname=mailcow_hostname, quarantine_acl=quarantine_acl)
|
||||
except (jinja2.exceptions.SecurityError, TemplateError) as ex:
|
||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
||||
return
|
||||
text = html2text.html2text(html)
|
||||
count = 0
|
||||
while count < 15:
|
||||
|
||||
@@ -6,7 +6,7 @@ from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from email.utils import COMMASPACE, formatdate
|
||||
import jinja2
|
||||
from jinja2 import Template
|
||||
from jinja2.sandbox import SandboxedEnvironment
|
||||
import redis
|
||||
import time
|
||||
import json
|
||||
@@ -14,11 +14,6 @@ import sys
|
||||
import html2text
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
|
||||
|
||||
# Don't run if role is not master
|
||||
if os.getenv("MASTER").lower() in ["n", "no"]:
|
||||
sys.exit()
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
percent = int(sys.argv[1])
|
||||
username = str(sys.argv[2])
|
||||
@@ -38,16 +33,24 @@ while True:
|
||||
|
||||
if r.get('QW_HTML'):
|
||||
try:
|
||||
template = Template(r.get('QW_HTML'))
|
||||
except:
|
||||
print("Error: Cannot parse quarantine template, falling back to default template.")
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(r.get('QW_HTML'))
|
||||
except Exception:
|
||||
print("Error: Cannot parse quota template, falling back to default template.")
|
||||
with open('/templates/quota.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(file_.read())
|
||||
else:
|
||||
with open('/templates/quota.tpl') as file_:
|
||||
template = Template(file_.read())
|
||||
env = SandboxedEnvironment()
|
||||
template = env.from_string(file_.read())
|
||||
|
||||
try:
|
||||
html = template.render(username=username, percent=percent)
|
||||
except (jinja2.exceptions.SecurityError, jinja2.TemplateError) as ex:
|
||||
print(f"SecurityError or TemplateError in template rendering: {ex}")
|
||||
sys.exit(1)
|
||||
|
||||
html = template.render(username=username, percent=percent)
|
||||
text = html2text.html2text(html)
|
||||
|
||||
try:
|
||||
|
||||
@@ -25,11 +25,11 @@ sed -i -e 's/\([^\\]\)\$\([^\/]\)/\1\\$\2/g' /etc/rspamd/custom/sa-rules
|
||||
|
||||
if [[ "$(cat /etc/rspamd/custom/sa-rules | md5sum | cut -d' ' -f1)" != "${HASH_SA_RULES}" ]]; then
|
||||
CONTAINER_NAME=rspamd-mailcow
|
||||
CONTAINER_ID=$(curl --silent --insecure https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | \
|
||||
CONTAINER_ID=$(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | \
|
||||
jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" | \
|
||||
jq -rc "select( .name | tostring | contains(\"${CONTAINER_NAME}\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id")
|
||||
if [[ ! -z ${CONTAINER_ID} ]]; then
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://dockerapi.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
curl --silent --insecure -XPOST --connect-timeout 15 --max-time 120 https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/restart
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@@ -11,8 +11,8 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
[program:dovecot]
|
||||
command=/usr/sbin/dovecot -F
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
FROM mariadb:10.11
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
python3-pip \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN pip install \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/mariadb/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["mysqld"]
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting MariaDB."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting MariaDB..."
|
||||
exec /usr/local/bin/docker-entrypoint.sh "$@"
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/sh
|
||||
|
||||
backend=iptables
|
||||
backend=nftables
|
||||
|
||||
nft list table ip filter &>/dev/null
|
||||
nftables_found=$?
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
DEBUG = False
|
||||
|
||||
import re
|
||||
import os
|
||||
import sys
|
||||
@@ -20,10 +22,13 @@ from modules.Logger import Logger
|
||||
from modules.IPTables import IPTables
|
||||
from modules.NFTables import NFTables
|
||||
|
||||
def logdebug(msg):
|
||||
if DEBUG:
|
||||
logger.logInfo("DEBUG: %s" % msg)
|
||||
|
||||
# globals
|
||||
# Globals
|
||||
WHITELIST = []
|
||||
BLACKLIST= []
|
||||
BLACKLIST = []
|
||||
bans = {}
|
||||
quit_now = False
|
||||
exit_code = 0
|
||||
@@ -33,12 +38,10 @@ r = None
|
||||
pubsub = None
|
||||
clear_before_quit = False
|
||||
|
||||
|
||||
def refreshF2boptions():
|
||||
global f2boptions
|
||||
global quit_now
|
||||
global exit_code
|
||||
|
||||
f2boptions = {}
|
||||
|
||||
if not r.get('F2B_OPTIONS'):
|
||||
@@ -52,8 +55,9 @@ def refreshF2boptions():
|
||||
else:
|
||||
try:
|
||||
f2boptions = json.loads(r.get('F2B_OPTIONS'))
|
||||
except ValueError:
|
||||
logger.logCrit('Error loading F2B options: F2B_OPTIONS is not json')
|
||||
except ValueError as e:
|
||||
logger.logCrit(
|
||||
'Error loading F2B options: F2B_OPTIONS is not json. Exception: %s' % e)
|
||||
quit_now = True
|
||||
exit_code = 2
|
||||
|
||||
@@ -61,15 +65,15 @@ def refreshF2boptions():
|
||||
r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False))
|
||||
|
||||
def verifyF2boptions(f2boptions):
|
||||
verifyF2boption(f2boptions,'ban_time', 1800)
|
||||
verifyF2boption(f2boptions,'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions,'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions,'max_attempts', 10)
|
||||
verifyF2boption(f2boptions,'retry_window', 600)
|
||||
verifyF2boption(f2boptions,'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions,'netban_ipv6', 128)
|
||||
verifyF2boption(f2boptions,'banlist_id', str(uuid.uuid4()))
|
||||
verifyF2boption(f2boptions,'manage_external', 0)
|
||||
verifyF2boption(f2boptions, 'ban_time', 1800)
|
||||
verifyF2boption(f2boptions, 'max_ban_time', 10000)
|
||||
verifyF2boption(f2boptions, 'ban_time_increment', True)
|
||||
verifyF2boption(f2boptions, 'max_attempts', 10)
|
||||
verifyF2boption(f2boptions, 'retry_window', 600)
|
||||
verifyF2boption(f2boptions, 'netban_ipv4', 32)
|
||||
verifyF2boption(f2boptions, 'netban_ipv6', 128)
|
||||
verifyF2boption(f2boptions, 'banlist_id', str(uuid.uuid4()))
|
||||
verifyF2boption(f2boptions, 'manage_external', 0)
|
||||
|
||||
def verifyF2boption(f2boptions, f2boption, f2bdefault):
|
||||
f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault
|
||||
@@ -111,7 +115,7 @@ def get_ip(address):
|
||||
def ban(address):
|
||||
global f2boptions
|
||||
global lock
|
||||
|
||||
logdebug("ban() called with address=%s" % address)
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
RETRY_WINDOW = int(f2boptions['retry_window'])
|
||||
@@ -119,31 +123,43 @@ def ban(address):
|
||||
NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6'])
|
||||
|
||||
ip = get_ip(address)
|
||||
if not ip: return
|
||||
if not ip:
|
||||
logdebug("No valid IP -- skipping ban()")
|
||||
return
|
||||
address = str(ip)
|
||||
self_network = ipaddress.ip_network(address)
|
||||
|
||||
with lock:
|
||||
temp_whitelist = set(WHITELIST)
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
if wl_net.overlaps(self_network):
|
||||
logger.logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
logdebug("Checking if %s overlaps with any WHITELIST entries" % self_network)
|
||||
if temp_whitelist:
|
||||
for wl_key in temp_whitelist:
|
||||
wl_net = ipaddress.ip_network(wl_key, False)
|
||||
logdebug("Checking overlap between %s and %s" % (self_network, wl_net))
|
||||
if wl_net.overlaps(self_network):
|
||||
logger.logInfo(
|
||||
'Address %s is allowlisted by rule %s' % (self_network, wl_net))
|
||||
return
|
||||
|
||||
net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = ipaddress.ip_network(
|
||||
(address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False)
|
||||
net = str(net)
|
||||
logdebug("Ban net: %s" % net)
|
||||
|
||||
if not net in bans:
|
||||
bans[net] = {'attempts': 0, 'last_attempt': 0, 'ban_counter': 0}
|
||||
logdebug("Initing new ban counter for %s" % net)
|
||||
|
||||
current_attempt = time.time()
|
||||
logdebug("Current attempt ts=%s, previous: %s, retry_window: %s" %
|
||||
(current_attempt, bans[net]['last_attempt'], RETRY_WINDOW))
|
||||
if current_attempt - bans[net]['last_attempt'] > RETRY_WINDOW:
|
||||
bans[net]['attempts'] = 0
|
||||
logdebug("Ban counter for %s reset as window expired" % net)
|
||||
|
||||
bans[net]['attempts'] += 1
|
||||
bans[net]['last_attempt'] = current_attempt
|
||||
logdebug("%s attempts now %d" % (net, bans[net]['attempts']))
|
||||
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
cur_time = int(round(time.time()))
|
||||
@@ -151,34 +167,41 @@ def ban(address):
|
||||
logger.logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 ))
|
||||
if type(ip) is ipaddress.IPv4Address and int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
logdebug("Calling tables.banIPv4(%s)" % net)
|
||||
tables.banIPv4(net)
|
||||
elif int(f2boptions['manage_external']) != 1:
|
||||
with lock:
|
||||
logdebug("Calling tables.banIPv6(%s)" % net)
|
||||
tables.banIPv6(net)
|
||||
|
||||
logdebug("Updating F2B_ACTIVE_BANS[%s]=%d" %
|
||||
(net, cur_time + NET_BAN_TIME))
|
||||
r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME)
|
||||
else:
|
||||
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
logger.logWarn('%d more attempts in the next %d seconds until %s is banned' % (
|
||||
MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net))
|
||||
|
||||
def unban(net):
|
||||
global lock
|
||||
|
||||
logdebug("Calling unban() with net=%s" % net)
|
||||
if not net in bans:
|
||||
logger.logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
|
||||
logger.logInfo(
|
||||
'%s is not banned, skipping unban and deleting from queue (if any)' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
return
|
||||
logger.logInfo('Unbanning %s' % net)
|
||||
if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network:
|
||||
with lock:
|
||||
logdebug("Calling tables.unbanIPv4(%s)" % net)
|
||||
tables.unbanIPv4(net)
|
||||
else:
|
||||
with lock:
|
||||
logdebug("Calling tables.unbanIPv6(%s)" % net)
|
||||
tables.unbanIPv6(net)
|
||||
|
||||
r.hdel('F2B_ACTIVE_BANS', '%s' % net)
|
||||
r.hdel('F2B_QUEUE_UNBAN', '%s' % net)
|
||||
if net in bans:
|
||||
logdebug("Unban for %s, setting attempts=0, ban_counter+=1" % net)
|
||||
bans[net]['attempts'] = 0
|
||||
bans[net]['ban_counter'] += 1
|
||||
|
||||
@@ -204,17 +227,19 @@ def permBan(net, unban=False):
|
||||
|
||||
if is_unbanned:
|
||||
r.hdel('F2B_PERM_BANS', '%s' % net)
|
||||
logger.logCrit('Removed host/network %s from blacklist' % net)
|
||||
logger.logCrit('Removed host/network %s from denylist' % net)
|
||||
elif is_banned:
|
||||
r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time())))
|
||||
logger.logCrit('Added host/network %s to blacklist' % net)
|
||||
logger.logCrit('Added host/network %s to denylist' % net)
|
||||
|
||||
def clear():
|
||||
global lock
|
||||
logger.logInfo('Clearing all bans')
|
||||
for net in bans.copy():
|
||||
logdebug("Unbanning net: %s" % net)
|
||||
unban(net)
|
||||
with lock:
|
||||
logdebug("Clearing IPv4/IPv6 table")
|
||||
tables.clearIPv4Table()
|
||||
tables.clearIPv6Table()
|
||||
try:
|
||||
@@ -275,21 +300,35 @@ def snat6(snat_target):
|
||||
|
||||
def autopurge():
|
||||
global f2boptions
|
||||
|
||||
logdebug("autopurge thread started")
|
||||
while not quit_now:
|
||||
logdebug("autopurge tick")
|
||||
time.sleep(10)
|
||||
refreshF2boptions()
|
||||
MAX_ATTEMPTS = int(f2boptions['max_attempts'])
|
||||
QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN')
|
||||
logdebug("QUEUE_UNBAN: %s" % QUEUE_UNBAN)
|
||||
if QUEUE_UNBAN:
|
||||
for net in QUEUE_UNBAN:
|
||||
logdebug("Autopurge: unbanning queued net: %s" % net)
|
||||
unban(str(net))
|
||||
for net in bans.copy():
|
||||
if bans[net]['attempts'] >= MAX_ATTEMPTS:
|
||||
NET_BAN_TIME = calcNetBanTime(bans[net]['ban_counter'])
|
||||
TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt']
|
||||
if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME:
|
||||
unban(net)
|
||||
# Only check expiry for actively banned IPs:
|
||||
active_bans = r.hgetall('F2B_ACTIVE_BANS')
|
||||
now = time.time()
|
||||
for net_str, expire_str in active_bans.items():
|
||||
logdebug("Checking ban expiry for (actively banned): %s" % net_str)
|
||||
# Defensive: always process if timer missing or expired
|
||||
try:
|
||||
expire = float(expire_str)
|
||||
except Exception:
|
||||
logdebug("Invalid expire time for %s; unbanning" % net_str)
|
||||
unban(net_str)
|
||||
continue
|
||||
time_left = expire - now
|
||||
logdebug("Time left for %s: %.1f seconds" % (net_str, time_left))
|
||||
if time_left <= 0:
|
||||
logdebug("Ban expired for %s" % net_str)
|
||||
unban(net_str)
|
||||
|
||||
def mailcowChainOrder():
|
||||
global lock
|
||||
@@ -359,7 +398,7 @@ def whitelistUpdate():
|
||||
with lock:
|
||||
if Counter(new_whitelist) != Counter(WHITELIST):
|
||||
WHITELIST = new_whitelist
|
||||
logger.logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST))
|
||||
logger.logInfo('Allowlist was changed, it has %s entries' % len(WHITELIST))
|
||||
time.sleep(60.0 - ((time.time() - start_time) % 60.0))
|
||||
|
||||
def blacklistUpdate():
|
||||
@@ -375,7 +414,7 @@ def blacklistUpdate():
|
||||
addban = set(new_blacklist).difference(BLACKLIST)
|
||||
delban = set(BLACKLIST).difference(new_blacklist)
|
||||
BLACKLIST = new_blacklist
|
||||
logger.logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST))
|
||||
logger.logInfo('Denylist was changed, it has %s entries' % len(BLACKLIST))
|
||||
if addban:
|
||||
for net in addban:
|
||||
permBan(net=net)
|
||||
@@ -386,42 +425,43 @@ def blacklistUpdate():
|
||||
|
||||
def sigterm_quit(signum, frame):
|
||||
global clear_before_quit
|
||||
logdebug("SIGTERM received, setting clear_before_quit to True and exiting")
|
||||
clear_before_quit = True
|
||||
sys.exit(exit_code)
|
||||
|
||||
def berfore_quit():
|
||||
def before_quit():
|
||||
logdebug("before_quit called, clear_before_quit=%s" % clear_before_quit)
|
||||
if clear_before_quit:
|
||||
clear()
|
||||
if pubsub is not None:
|
||||
pubsub.unsubscribe()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
atexit.register(berfore_quit)
|
||||
logger = Logger()
|
||||
logdebug("Sys.argv: %s" % sys.argv)
|
||||
atexit.register(before_quit)
|
||||
signal.signal(signal.SIGTERM, sigterm_quit)
|
||||
|
||||
# init Logger
|
||||
logger = Logger()
|
||||
|
||||
# init backend
|
||||
backend = sys.argv[1]
|
||||
logdebug("Backend: %s" % backend)
|
||||
if backend == "nftables":
|
||||
logger.logInfo('Using NFTables backend')
|
||||
tables = NFTables(chain_name, logger)
|
||||
else:
|
||||
logger.logInfo('Using IPTables backend')
|
||||
logger.logWarn(
|
||||
"DEPRECATION: iptables-legacy is deprecated and will be removed in future releases. "
|
||||
"Please switch to nftables on your host to ensure complete compatibility."
|
||||
)
|
||||
time.sleep(5)
|
||||
tables = IPTables(chain_name, logger)
|
||||
|
||||
# In case a previous session was killed without cleanup
|
||||
clear()
|
||||
|
||||
# Reinit MAILCOW chain
|
||||
# Is called before threads start, no locking
|
||||
logger.logInfo("Initializing mailcow netfilter chain")
|
||||
tables.initChainIPv4()
|
||||
tables.initChainIPv6()
|
||||
|
||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE").lower() in ("y", "yes"):
|
||||
if os.getenv("DISABLE_NETFILTER_ISOLATION_RULE", "").lower() in ("y", "yes"):
|
||||
logger.logInfo(f"Skipping {chain_name} isolation")
|
||||
else:
|
||||
logger.logInfo(f"Setting {chain_name} isolation")
|
||||
@@ -432,23 +472,28 @@ if __name__ == '__main__':
|
||||
try:
|
||||
redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '')
|
||||
redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '')
|
||||
logdebug(
|
||||
"Connecting redis (SLAVEOF_IP:%s, PORT:%s)" % (redis_slaveof_ip, redis_slaveof_port))
|
||||
if "".__eq__(redis_slaveof_ip):
|
||||
r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
r = redis.StrictRedis(
|
||||
host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0, password=os.environ['REDISPASS'])
|
||||
else:
|
||||
r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
||||
r = redis.StrictRedis(
|
||||
host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0, password=os.environ['REDISPASS'])
|
||||
r.ping()
|
||||
pubsub = r.pubsub()
|
||||
except Exception as ex:
|
||||
print('%s - trying again in 3 seconds' % (ex))
|
||||
logdebug(
|
||||
'Redis connection failed: %s - trying again in 3 seconds' % (ex))
|
||||
time.sleep(3)
|
||||
else:
|
||||
break
|
||||
logger.set_redis(r)
|
||||
logdebug("Redis connection established, setting up F2B keys")
|
||||
|
||||
# rename fail2ban to netfilter
|
||||
if r.exists('F2B_LOG'):
|
||||
logdebug("Renaming F2B_LOG to NETFILTER_LOG")
|
||||
r.rename('F2B_LOG', 'NETFILTER_LOG')
|
||||
# clear bans in redis
|
||||
r.delete('F2B_ACTIVE_BANS')
|
||||
r.delete('F2B_PERM_BANS')
|
||||
|
||||
@@ -463,7 +508,7 @@ if __name__ == '__main__':
|
||||
snat_ip = os.getenv('SNAT_TO_SOURCE')
|
||||
snat_ipo = ipaddress.ip_address(snat_ip)
|
||||
if type(snat_ipo) is ipaddress.IPv4Address:
|
||||
snat4_thread = Thread(target=snat4,args=(snat_ip,))
|
||||
snat4_thread = Thread(target=snat4, args=(snat_ip,))
|
||||
snat4_thread.daemon = True
|
||||
snat4_thread.start()
|
||||
except ValueError:
|
||||
@@ -499,4 +544,5 @@ if __name__ == '__main__':
|
||||
while not quit_now:
|
||||
time.sleep(0.5)
|
||||
|
||||
sys.exit(exit_code)
|
||||
logdebug("Exiting with code %s" % exit_code)
|
||||
sys.exit(exit_code)
|
||||
@@ -1,5 +1,6 @@
|
||||
import time
|
||||
import json
|
||||
import datetime
|
||||
|
||||
class Logger:
|
||||
def __init__(self):
|
||||
@@ -8,17 +9,28 @@ class Logger:
|
||||
def set_redis(self, redis):
|
||||
self.r = redis
|
||||
|
||||
def _format_timestamp(self):
|
||||
# Local time with milliseconds
|
||||
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def log(self, priority, message):
|
||||
tolog = {}
|
||||
tolog['time'] = int(round(time.time()))
|
||||
tolog['priority'] = priority
|
||||
tolog['message'] = message
|
||||
print(message)
|
||||
# build redis-friendly dict
|
||||
tolog = {
|
||||
'time': int(round(time.time())), # keep raw timestamp for Redis
|
||||
'priority': priority,
|
||||
'message': message
|
||||
}
|
||||
|
||||
# print human-readable message with timestamp
|
||||
ts = self._format_timestamp()
|
||||
print(f"{ts} {priority.upper()}: {message}", flush=True)
|
||||
|
||||
# also push JSON to Redis if connected
|
||||
if self.r is not None:
|
||||
try:
|
||||
self.r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False))
|
||||
except Exception as ex:
|
||||
print('Failed logging to redis: %s' % (ex))
|
||||
print(f'{ts} WARN: Failed logging to redis: {ex}', flush=True)
|
||||
|
||||
def logWarn(self, message):
|
||||
self.log('warn', message)
|
||||
@@ -27,4 +39,4 @@ class Logger:
|
||||
self.log('crit', message)
|
||||
|
||||
def logInfo(self, message):
|
||||
self.log('info', message)
|
||||
self.log('info', message)
|
||||
@@ -4,33 +4,15 @@ LABEL maintainer "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
ENV PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
RUN apk add --no-cache nginx \
|
||||
python3 py3-pip \
|
||||
supervisor
|
||||
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
python3 \
|
||||
py3-pip && \
|
||||
pip install --upgrade pip && \
|
||||
pip install Jinja2
|
||||
|
||||
RUN mkdir -p /etc/nginx/includes
|
||||
|
||||
COPY ./bootstrap.py /
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/nginx/docker-entrypoint.sh /
|
||||
COPY data/Dockerfiles/nginx/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/nginx/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
|
||||
100
data/Dockerfiles/nginx/bootstrap.py
Normal file
100
data/Dockerfiles/nginx/bootstrap.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import os
|
||||
import subprocess
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
def includes_conf(env, template_vars):
|
||||
server_name = "server_name.active"
|
||||
listen_plain = "listen_plain.active"
|
||||
listen_ssl = "listen_ssl.active"
|
||||
|
||||
server_name_config = f"server_name {template_vars['MAILCOW_HOSTNAME']} autodiscover.* autoconfig.* {' '.join(template_vars['ADDITIONAL_SERVER_NAMES'])};"
|
||||
listen_plain_config = f"listen {template_vars['HTTP_PORT']};"
|
||||
listen_ssl_config = f"listen {template_vars['HTTPS_PORT']};"
|
||||
if template_vars['ENABLE_IPV6']:
|
||||
listen_plain_config += f"\nlisten [::]:{template_vars['HTTP_PORT']};"
|
||||
listen_ssl_config += f"\nlisten [::]:{template_vars['HTTPS_PORT']} ssl;"
|
||||
listen_ssl_config += "\nhttp2 on;"
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{server_name}", "w") as f:
|
||||
f.write(server_name_config)
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{listen_plain}", "w") as f:
|
||||
f.write(listen_plain_config)
|
||||
|
||||
with open(f"/etc/nginx/conf.d/{listen_ssl}", "w") as f:
|
||||
f.write(listen_ssl_config)
|
||||
|
||||
def sites_default_conf(env, template_vars):
|
||||
config_name = "sites-default.conf"
|
||||
template = env.get_template(f"{config_name}.j2")
|
||||
config = template.render(template_vars)
|
||||
|
||||
with open(f"/etc/nginx/includes/{config_name}", "w") as f:
|
||||
f.write(config)
|
||||
|
||||
def nginx_conf(env, template_vars):
|
||||
config_name = "nginx.conf"
|
||||
template = env.get_template(f"{config_name}.j2")
|
||||
config = template.render(template_vars)
|
||||
|
||||
with open(f"/etc/nginx/{config_name}", "w") as f:
|
||||
f.write(config)
|
||||
|
||||
def prepare_template_vars():
|
||||
ipv4_network = os.getenv("IPV4_NETWORK", "172.22.1")
|
||||
additional_server_names = os.getenv("ADDITIONAL_SERVER_NAMES", "")
|
||||
trusted_proxies = os.getenv("TRUSTED_PROXIES", "")
|
||||
|
||||
template_vars = {
|
||||
'IPV4_NETWORK': ipv4_network,
|
||||
'TRUSTED_PROXIES': [item.strip() for item in trusted_proxies.split(",") if item.strip()],
|
||||
'SKIP_RSPAMD': os.getenv("SKIP_RSPAMD", "n").lower() in ("y", "yes"),
|
||||
'SKIP_SOGO': os.getenv("SKIP_SOGO", "n").lower() in ("y", "yes"),
|
||||
'NGINX_USE_PROXY_PROTOCOL': os.getenv("NGINX_USE_PROXY_PROTOCOL", "n").lower() in ("y", "yes"),
|
||||
'MAILCOW_HOSTNAME': os.getenv("MAILCOW_HOSTNAME", ""),
|
||||
'ADDITIONAL_SERVER_NAMES': [item.strip() for item in additional_server_names.split(",") if item.strip()],
|
||||
'HTTP_PORT': os.getenv("HTTP_PORT", "80"),
|
||||
'HTTPS_PORT': os.getenv("HTTPS_PORT", "443"),
|
||||
'SOGOHOST': os.getenv("SOGOHOST", ipv4_network + ".248"),
|
||||
'RSPAMDHOST': os.getenv("RSPAMDHOST", "rspamd-mailcow"),
|
||||
'PHPFPMHOST': os.getenv("PHPFPMHOST", "php-fpm-mailcow"),
|
||||
'ENABLE_IPV6': os.getenv("ENABLE_IPV6", "true").lower() != "false",
|
||||
'HTTP_REDIRECT': os.getenv("HTTP_REDIRECT", "n").lower() in ("y", "yes"),
|
||||
}
|
||||
|
||||
ssl_dir = '/etc/ssl/mail/'
|
||||
template_vars['valid_cert_dirs'] = []
|
||||
for d in os.listdir(ssl_dir):
|
||||
full_path = os.path.join(ssl_dir, d)
|
||||
if not os.path.isdir(full_path):
|
||||
continue
|
||||
|
||||
cert_path = os.path.join(full_path, 'cert.pem')
|
||||
key_path = os.path.join(full_path, 'key.pem')
|
||||
domains_path = os.path.join(full_path, 'domains')
|
||||
|
||||
if os.path.isfile(cert_path) and os.path.isfile(key_path) and os.path.isfile(domains_path):
|
||||
with open(domains_path, 'r') as file:
|
||||
domains = file.read().strip()
|
||||
domains_list = domains.split()
|
||||
if domains_list and template_vars["MAILCOW_HOSTNAME"] not in domains_list:
|
||||
template_vars['valid_cert_dirs'].append({
|
||||
'cert_path': full_path + '/',
|
||||
'domains': domains
|
||||
})
|
||||
|
||||
return template_vars
|
||||
|
||||
def main():
|
||||
env = Environment(loader=FileSystemLoader('./etc/nginx/conf.d/templates'))
|
||||
|
||||
# Render config
|
||||
print("Render config")
|
||||
template_vars = prepare_template_vars()
|
||||
sites_default_conf(env, template_vars)
|
||||
nginx_conf(env, template_vars)
|
||||
includes_conf(env, template_vars)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,20 +1,26 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
echo "Running hook ${file}"
|
||||
"${file}"
|
||||
fi
|
||||
PHPFPMHOST=${PHPFPMHOST:-"php-fpm-mailcow"}
|
||||
SOGOHOST=${SOGOHOST:-"$IPV4_NETWORK.248"}
|
||||
RSPAMDHOST=${RSPAMDHOST:-"rspamd-mailcow"}
|
||||
|
||||
until ping ${PHPFPMHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for PHP..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Nginx."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
if ! printf "%s\n" "${SKIP_SOGO}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
||||
until ping ${SOGOHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for SOGo..."
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
if ! printf "%s\n" "${SKIP_RSPAMD}" | grep -E '^([yY][eE][sS]|[yY])+$' >/dev/null; then
|
||||
until ping ${RSPAMDHOST} -c1 > /dev/null; do
|
||||
echo "Waiting for Rspamd..."
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Nginx..."
|
||||
nginx -g "daemon off;"
|
||||
python3 /bootstrap.py
|
||||
|
||||
exec "$@"
|
||||
|
||||
@@ -3,15 +3,15 @@ FROM php:8.2-fpm-alpine3.21
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
# renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG APCU_PECL_VERSION=5.1.24
|
||||
ARG APCU_PECL_VERSION=5.1.28
|
||||
# renovate: datasource=github-tags depName=Imagick/imagick versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG IMAGICK_PECL_VERSION=3.8.0
|
||||
ARG IMAGICK_PECL_VERSION=3.8.1
|
||||
# renovate: datasource=github-tags depName=php/pecl-mail-mailparse versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.8
|
||||
ARG MAILPARSE_PECL_VERSION=3.1.9
|
||||
# renovate: datasource=github-tags depName=php-memcached-dev/php-memcached versioning=semver-coerced extractVersion=^v(?<version>.*)$
|
||||
ARG MEMCACHED_PECL_VERSION=3.2.0
|
||||
ARG MEMCACHED_PECL_VERSION=3.4.0
|
||||
# renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG REDIS_PECL_VERSION=6.1.0
|
||||
ARG REDIS_PECL_VERSION=6.3.0
|
||||
# renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced extractVersion=(?<version>.*)$
|
||||
ARG COMPOSER_VERSION=2.8.6
|
||||
|
||||
@@ -63,7 +63,6 @@ RUN apk add -U --no-cache autoconf \
|
||||
samba-client \
|
||||
zlib-dev \
|
||||
tzdata \
|
||||
python3 py3-pip \
|
||||
&& pecl install APCu-${APCU_PECL_VERSION} \
|
||||
&& pecl install imagick-${IMAGICK_PECL_VERSION} \
|
||||
&& pecl install mailparse-${MAILPARSE_PECL_VERSION} \
|
||||
@@ -73,7 +72,7 @@ RUN apk add -U --no-cache autoconf \
|
||||
&& pecl clear-cache \
|
||||
&& docker-php-ext-configure intl \
|
||||
&& docker-php-ext-configure exif \
|
||||
&& docker-php-ext-configure gd --with-freetype=/usr/include/ \
|
||||
&& docker-php-ext-configure gd --with-freetype=/usr/include/ \
|
||||
--with-jpeg=/usr/include/ \
|
||||
--with-webp \
|
||||
--with-xpm \
|
||||
@@ -108,26 +107,8 @@ RUN apk add -U --no-cache autoconf \
|
||||
pcre-dev \
|
||||
zlib-dev
|
||||
|
||||
RUN apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
python3-dev \
|
||||
linux-headers \
|
||||
&& pip install --break-system-packages psutil \
|
||||
&& apk del .build-deps
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/phpfpm/docker-entrypoint.sh /
|
||||
|
||||
RUN chmod +x /docker-entrypoint.sh
|
||||
|
||||
COPY ./docker-entrypoint.sh /
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["php-fpm"]
|
||||
|
||||
@@ -1,5 +1,219 @@
|
||||
#!/bin/bash
|
||||
|
||||
function array_by_comma { local IFS=","; echo "$*"; }
|
||||
|
||||
# Wait for containers
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
REDIS_HOST=$REDIS_SLAVEOF_IP
|
||||
REDIS_PORT=$REDIS_SLAVEOF_PORT
|
||||
else
|
||||
REDIS_HOST="redis"
|
||||
REDIS_PORT="6379"
|
||||
fi
|
||||
REDIS_CMDLINE="redis-cli -h ${REDIS_HOST} -p ${REDIS_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Set redis session store
|
||||
echo -n '
|
||||
session.save_handler = redis
|
||||
session.save_path = "tcp://'${REDIS_HOST}':'${REDIS_PORT}'?auth='${REDISPASS}'"
|
||||
' > /usr/local/etc/php/conf.d/session_store.ini
|
||||
|
||||
# Check mysql_upgrade (master and slave)
|
||||
CONTAINER_ID=
|
||||
until [[ ! -z "${CONTAINER_ID}" ]] && [[ "${CONTAINER_ID}" =~ ^[[:alnum:]]*$ ]]; do
|
||||
CONTAINER_ID=$(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"mysql-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
echo "Could not get mysql-mailcow container id... trying again"
|
||||
sleep 2
|
||||
done
|
||||
echo "MySQL @ ${CONTAINER_ID}"
|
||||
SQL_LOOP_C=0
|
||||
SQL_CHANGED=0
|
||||
until [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; do
|
||||
if [ ${SQL_LOOP_C} -gt 4 ]; then
|
||||
echo "Tried to upgrade MySQL and failed, giving up after ${SQL_LOOP_C} retries and starting container (oops, not good)"
|
||||
break
|
||||
fi
|
||||
SQL_FULL_UPGRADE_RETURN=$(curl --silent --insecure -XPOST https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_upgrade"}' --silent -H 'Content-type: application/json')
|
||||
SQL_UPGRADE_STATUS=$(echo ${SQL_FULL_UPGRADE_RETURN} | jq -r .type)
|
||||
SQL_LOOP_C=$((SQL_LOOP_C+1))
|
||||
echo "SQL upgrade iteration #${SQL_LOOP_C}"
|
||||
if [[ ${SQL_UPGRADE_STATUS} == 'warning' ]]; then
|
||||
SQL_CHANGED=1
|
||||
echo "MySQL applied an upgrade, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
sleep 3
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for SQL to return, please wait"
|
||||
sleep 2
|
||||
done
|
||||
continue
|
||||
elif [[ ${SQL_UPGRADE_STATUS} == 'success' ]]; then
|
||||
echo "MySQL is up-to-date - debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
else
|
||||
echo "No valid reponse for mysql_upgrade was received, debug output:"
|
||||
echo ${SQL_FULL_UPGRADE_RETURN}
|
||||
fi
|
||||
done
|
||||
|
||||
# doing post-installation stuff, if SQL was upgraded (master and slave)
|
||||
if [ ${SQL_CHANGED} -eq 1 ]; then
|
||||
POSTFIX=$(curl --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/json | jq -r ".[] | {name: .Config.Labels[\"com.docker.compose.service\"], project: .Config.Labels[\"com.docker.compose.project\"], id: .Id}" 2> /dev/null | jq -rc "select( .name | tostring | contains(\"postfix-mailcow\")) | select( .project | tostring | contains(\"${COMPOSE_PROJECT_NAME,,}\")) | .id" 2> /dev/null)
|
||||
if [[ -z "${POSTFIX}" ]] || ! [[ "${POSTFIX}" =~ ^[[:alnum:]]*$ ]]; then
|
||||
echo "Could not determine Postfix container ID, skipping Postfix restart."
|
||||
else
|
||||
echo "Restarting Postfix"
|
||||
curl -X POST --silent --insecure https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${POSTFIX}/restart | jq -r '.msg'
|
||||
echo "Sleeping 5 seconds..."
|
||||
sleep 5
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check mysql tz import (master and slave)
|
||||
TZ_CHECK=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT CONVERT_TZ('2019-11-02 23:33:00','Europe/Berlin','UTC') AS time;" -BN 2> /dev/null)
|
||||
if [[ -z ${TZ_CHECK} ]] || [[ "${TZ_CHECK}" == "NULL" ]]; then
|
||||
SQL_FULL_TZINFO_IMPORT_RETURN=$(curl --silent --insecure -XPOST https://controller.${COMPOSE_PROJECT_NAME}_mailcow-network/containers/${CONTAINER_ID}/exec -d '{"cmd":"system", "task":"mysql_tzinfo_to_sql"}' --silent -H 'Content-type: application/json')
|
||||
echo "MySQL mysql_tzinfo_to_sql - debug output:"
|
||||
echo ${SQL_FULL_TZINFO_IMPORT_RETURN}
|
||||
fi
|
||||
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo "We are master, preparing..."
|
||||
# Set a default release format
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_RELEASE_FORMAT) ]]; then
|
||||
${REDIS_CMDLINE} --raw SET Q_RELEASE_FORMAT raw
|
||||
fi
|
||||
|
||||
# Set max age of q items - if unset
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw GET Q_MAX_AGE) ]]; then
|
||||
${REDIS_CMDLINE} --raw SET Q_MAX_AGE 365
|
||||
fi
|
||||
|
||||
# Set default password policy - if unset
|
||||
if [[ -z $(${REDIS_CMDLINE} --raw HGET PASSWD_POLICY length) ]]; then
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY length 6
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY chars 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY special_chars 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY lowerupper 0
|
||||
${REDIS_CMDLINE} --raw HSET PASSWD_POLICY numbers 0
|
||||
fi
|
||||
|
||||
# Trigger db init
|
||||
echo "Running DB init..."
|
||||
php -c /usr/local/etc/php -f /web/inc/init_db.inc.php
|
||||
|
||||
# Recreating domain map
|
||||
echo "Rebuilding domain map in Redis..."
|
||||
declare -a DOMAIN_ARR
|
||||
${REDIS_CMDLINE} DEL DOMAIN_MAP > /dev/null
|
||||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain FROM domain" -Bs)
|
||||
while read line
|
||||
do
|
||||
DOMAIN_ARR+=("$line")
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT alias_domain FROM alias_domain" -Bs)
|
||||
|
||||
if [[ ! -z ${DOMAIN_ARR} ]]; then
|
||||
for domain in "${DOMAIN_ARR[@]}"; do
|
||||
${REDIS_CMDLINE} HSET DOMAIN_MAP ${domain} 1 > /dev/null
|
||||
done
|
||||
fi
|
||||
|
||||
# Set API options if env vars are not empty
|
||||
if [[ ${API_ALLOW_FROM} != "invalid" ]] && [[ ! -z ${API_ALLOW_FROM} ]]; then
|
||||
IFS=',' read -r -a API_ALLOW_FROM_ARR <<< "${API_ALLOW_FROM}"
|
||||
declare -a VALIDATED_API_ALLOW_FROM_ARR
|
||||
REGEX_IP6='^([0-9a-fA-F]{0,4}:){1,7}[0-9a-fA-F]{0,4}(/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$'
|
||||
REGEX_IP4='^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+(/([0-9]|[1-2][0-9]|3[0-2]))?$'
|
||||
for IP in "${API_ALLOW_FROM_ARR[@]}"; do
|
||||
if [[ ${IP} =~ ${REGEX_IP6} ]] || [[ ${IP} =~ ${REGEX_IP4} ]]; then
|
||||
VALIDATED_API_ALLOW_FROM_ARR+=("${IP}")
|
||||
fi
|
||||
done
|
||||
VALIDATED_IPS=$(array_by_comma ${VALIDATED_API_ALLOW_FROM_ARR[*]})
|
||||
if [[ ! -z ${VALIDATED_IPS} ]]; then
|
||||
if [[ ${API_KEY} != "invalid" ]] && [[ ! -z ${API_KEY} ]]; then
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'rw';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY}", "1", "${VALIDATED_IPS}", "rw");
|
||||
EOF
|
||||
fi
|
||||
if [[ ${API_KEY_READ_ONLY} != "invalid" ]] && [[ ! -z ${API_KEY_READ_ONLY} ]]; then
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DELETE FROM api WHERE access = 'ro';
|
||||
INSERT INTO api (api_key, active, allow_from, access) VALUES ("${API_KEY_READ_ONLY}", "1", "${VALIDATED_IPS}", "ro");
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create events (master only, STATUS for event on slave will be SLAVESIDE_DISABLED)
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} << EOF
|
||||
DROP EVENT IF EXISTS clean_spamalias;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_spamalias
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE FROM spamalias WHERE validity < UNIX_TIMESTAMP() AND permanent = 0;
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
DROP EVENT IF EXISTS clean_oauth2;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_oauth2
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE FROM oauth_refresh_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_access_tokens WHERE expires < NOW();
|
||||
DELETE FROM oauth_authorization_codes WHERE expires < NOW();
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
DROP EVENT IF EXISTS clean_sasl_log;
|
||||
DELIMITER //
|
||||
CREATE EVENT clean_sasl_log
|
||||
ON SCHEDULE EVERY 1 DAY DO
|
||||
BEGIN
|
||||
DELETE sasl_log.* FROM sasl_log
|
||||
LEFT JOIN (
|
||||
SELECT username, service, MAX(datetime) AS lastdate
|
||||
FROM sasl_log
|
||||
GROUP BY username, service
|
||||
) AS last ON sasl_log.username = last.username AND sasl_log.service = last.service
|
||||
WHERE datetime < DATE_SUB(NOW(), INTERVAL 31 DAY) AND datetime < lastdate;
|
||||
DELETE FROM sasl_log
|
||||
WHERE username NOT IN (SELECT username FROM mailbox) AND
|
||||
datetime < DATE_SUB(NOW(), INTERVAL 31 DAY);
|
||||
END;
|
||||
//
|
||||
DELIMITER ;
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Create dummy for custom overrides of mailcow style
|
||||
[[ ! -f /web/css/build/0081-custom-mailcow.css ]] && echo '/* Autogenerated by mailcow */' > /web/css/build/0081-custom-mailcow.css
|
||||
|
||||
# Fix permissions for global filters
|
||||
chown -R 82:82 /global_sieve/*
|
||||
|
||||
# Fix permissions on twig cache folder
|
||||
chown -R 82:82 /web/templates/cache
|
||||
# Clear cache
|
||||
find /web/templates/cache/* -not -name '.gitkeep' -delete
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
@@ -8,13 +222,4 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting PHP-FPM."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting PHP-FPM..."
|
||||
exec "$@"
|
||||
|
||||
50
data/Dockerfiles/postfix-tlspol/Dockerfile
Normal file
50
data/Dockerfiles/postfix-tlspol/Dockerfile
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM golang:1.25-bookworm AS builder
|
||||
WORKDIR /src
|
||||
|
||||
ENV CGO_ENABLED=0 \
|
||||
GO111MODULE=on \
|
||||
NOOPT=1 \
|
||||
VERSION=1.8.22
|
||||
|
||||
RUN git clone --branch v${VERSION} https://github.com/Zuplu/postfix-tlspol && \
|
||||
cd /src/postfix-tlspol && \
|
||||
scripts/build.sh build-only
|
||||
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
dirmngr \
|
||||
dnsutils \
|
||||
iputils-ping \
|
||||
sudo \
|
||||
supervisor \
|
||||
redis-tools \
|
||||
syslog-ng \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale
|
||||
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY postfix-tlspol.sh /opt/postfix-tlspol.sh
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
COPY --from=builder /src/postfix-tlspol/build/postfix-tlspol /usr/local/bin/postfix-tlspol
|
||||
|
||||
RUN chmod +x /opt/postfix-tlspol.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh \
|
||||
/docker-entrypoint.sh
|
||||
RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
7
data/Dockerfiles/postfix-tlspol/docker-entrypoint.sh
Executable file
7
data/Dockerfiles/postfix-tlspol/docker-entrypoint.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
exec "$@"
|
||||
52
data/Dockerfiles/postfix-tlspol/postfix-tlspol.sh
Executable file
52
data/Dockerfiles/postfix-tlspol/postfix-tlspol.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
|
||||
LOGLVL=info
|
||||
|
||||
if [ ${DEV_MODE} != "n" ]; then
|
||||
echo -e "\e[31mEnabling debug mode\e[0m"
|
||||
set -x
|
||||
LOGLVL=debug
|
||||
fi
|
||||
|
||||
[[ ! -d /etc/postfix-tlspol ]] && mkdir -p /etc/postfix-tlspol
|
||||
[[ ! -d /var/lib/postfix-tlspol ]] && mkdir -p /var/lib/postfix-tlspol
|
||||
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Do not attempt to write to slave
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
export REDIS_CMDLINE="redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning"
|
||||
else
|
||||
export REDIS_CMDLINE="redis-cli -h redis -p 6379 -a ${REDISPASS} --no-auth-warning"
|
||||
fi
|
||||
|
||||
until [[ $(${REDIS_CMDLINE} PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Waiting for Postfix..."
|
||||
until ping postfix -c1 > /dev/null; do
|
||||
sleep 1
|
||||
done
|
||||
echo "Postfix OK"
|
||||
|
||||
cat <<EOF > /etc/postfix-tlspol/config.yaml
|
||||
server:
|
||||
address: 0.0.0.0:8642
|
||||
|
||||
log-level: ${LOGLVL}
|
||||
|
||||
prefetch: true
|
||||
|
||||
cache-file: /var/lib/postfix-tlspol/cache.db
|
||||
|
||||
dns:
|
||||
# must support DNSSEC
|
||||
address: 127.0.0.11:53
|
||||
EOF
|
||||
|
||||
/usr/local/bin/postfix-tlspol -config /etc/postfix-tlspol/config.yaml
|
||||
8
data/Dockerfiles/postfix-tlspol/stop-supervisor.sh
Executable file
8
data/Dockerfiles/postfix-tlspol/stop-supervisor.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
printf "READY\n";
|
||||
|
||||
while read line; do
|
||||
echo "Processing Event: $line" >&2;
|
||||
kill -3 $(cat "/var/run/supervisord.pid")
|
||||
done < /dev/stdin
|
||||
@@ -1,4 +1,5 @@
|
||||
[supervisord]
|
||||
pidfile=/var/run/supervisord.pid
|
||||
nodaemon=true
|
||||
user=root
|
||||
|
||||
@@ -9,19 +10,16 @@ stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
priority=1
|
||||
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
[program:postfix-tlspol]
|
||||
startsecs=10
|
||||
autorestart=true
|
||||
command=/opt/postfix-tlspol.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
priority=2
|
||||
startretries=10
|
||||
autorestart=true
|
||||
stopwaitsecs=120
|
||||
|
||||
[eventlistener:processes]
|
||||
command=/usr/local/sbin/stop-supervisor.sh
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
events=PROCESS_STATE_STOPPED, PROCESS_STATE_EXITED, PROCESS_STATE_FATAL
|
||||
45
data/Dockerfiles/postfix-tlspol/syslog-ng-redis_slave.conf
Normal file
45
data/Dockerfiles/postfix-tlspol/syslog-ng-redis_slave.conf
Normal file
@@ -0,0 +1,45 @@
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
dns_cache(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
destination d_redis_ui_log {
|
||||
redis(
|
||||
host("`REDIS_SLAVEOF_IP`")
|
||||
persist-name("redis1")
|
||||
port(`REDIS_SLAVEOF_PORT`)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
filter f_mail { facility(mail); };
|
||||
# start
|
||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
||||
# Some other warnings are ignored
|
||||
filter f_ignore {
|
||||
not match("overriding earlier entry" value("MESSAGE"));
|
||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
||||
not match("no SASL support" value("MESSAGE"));
|
||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
||||
};
|
||||
# end
|
||||
log {
|
||||
source(s_src);
|
||||
filter(f_ignore);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
destination(d_redis_ui_log);
|
||||
};
|
||||
45
data/Dockerfiles/postfix-tlspol/syslog-ng.conf
Normal file
45
data/Dockerfiles/postfix-tlspol/syslog-ng.conf
Normal file
@@ -0,0 +1,45 @@
|
||||
@version: 3.38
|
||||
@include "scl.conf"
|
||||
options {
|
||||
chain_hostnames(off);
|
||||
flush_lines(0);
|
||||
use_dns(no);
|
||||
dns_cache(no);
|
||||
use_fqdn(no);
|
||||
owner("root"); group("adm"); perm(0640);
|
||||
stats_freq(0);
|
||||
bad_hostname("^gconfd$");
|
||||
};
|
||||
source s_src {
|
||||
unix-stream("/dev/log");
|
||||
internal();
|
||||
};
|
||||
destination d_stdout { pipe("/dev/stdout"); };
|
||||
destination d_redis_ui_log {
|
||||
redis(
|
||||
host("redis-mailcow")
|
||||
persist-name("redis1")
|
||||
port(6379)
|
||||
auth("`REDISPASS`")
|
||||
command("LPUSH" "POSTFIX_MAILLOG" "$(format-json time=\"$S_UNIXTIME\" priority=\"$PRIORITY\" program=\"$PROGRAM\" message=\"$MESSAGE\")\n")
|
||||
);
|
||||
};
|
||||
filter f_mail { facility(mail); };
|
||||
# start
|
||||
# overriding warnings are still displayed when the entrypoint runs its initial check
|
||||
# warnings logged by postfix-mailcow to syslog are hidden to reduce repeating msgs
|
||||
# Some other warnings are ignored
|
||||
filter f_ignore {
|
||||
not match("overriding earlier entry" value("MESSAGE"));
|
||||
not match("TLS SNI from checks.mailcow.email" value("MESSAGE"));
|
||||
not match("no SASL support" value("MESSAGE"));
|
||||
not facility (local0, local1, local2, local3, local4, local5, local6, local7);
|
||||
};
|
||||
# end
|
||||
log {
|
||||
source(s_src);
|
||||
filter(f_ignore);
|
||||
destination(d_stdout);
|
||||
filter(f_mail);
|
||||
destination(d_redis_ui_log);
|
||||
};
|
||||
@@ -1,9 +1,9 @@
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
LABEL maintainer = "The Infrastructure Company GmbH <info@servercow.de>"
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ENV LC_ALL C
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN dpkg-divert --local --rename --add /sbin/initctl \
|
||||
&& ln -sf /bin/true /sbin/initctl \
|
||||
@@ -34,31 +34,23 @@ RUN groupadd -g 102 postfix \
|
||||
syslog-ng-core \
|
||||
syslog-ng-mod-redis \
|
||||
tzdata \
|
||||
python3 python3-pip \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale \
|
||||
&& printf '#!/bin/bash\n/usr/sbin/postconf -c /opt/postfix/conf "$@"' > /usr/local/sbin/postconf \
|
||||
&& chmod +x /usr/local/sbin/postconf
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY postfix.sh /opt/postfix.sh
|
||||
COPY rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
||||
COPY rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
||||
COPY whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/postfix/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/postfix/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/postfix/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/postfix/rspamd-pipe-ham /usr/local/bin/rspamd-pipe-ham
|
||||
COPY data/Dockerfiles/postfix/rspamd-pipe-spam /usr/local/bin/rspamd-pipe-spam
|
||||
COPY data/Dockerfiles/postfix/whitelist_forwardinghosts.sh /usr/local/bin/whitelist_forwardinghosts.sh
|
||||
COPY data/Dockerfiles/postfix/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/postfix/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
RUN chmod +x /usr/local/bin/rspamd-pipe-ham \
|
||||
/docker-entrypoint.sh \
|
||||
RUN chmod +x /opt/postfix.sh \
|
||||
/usr/local/bin/rspamd-pipe-ham \
|
||||
/usr/local/bin/rspamd-pipe-spam \
|
||||
/usr/local/bin/whitelist_forwardinghosts.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
@@ -66,5 +58,6 @@ RUN rm -rf /tmp/* /var/tmp/*
|
||||
|
||||
EXPOSE 588
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
|
||||
@@ -8,12 +8,8 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Postfix."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cp /etc/syslog-ng/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng.conf
|
||||
fi
|
||||
|
||||
# Fix OpenSSL 3.X TLS1.0, 1.1 support (https://community.mailcow.email/d/4062-hi-all/20)
|
||||
@@ -25,16 +21,6 @@ if grep -qE '\!SSLv2|\!SSLv3|>=TLSv1(\.[0-1])?$' /opt/postfix/conf/main.cf /opt/
|
||||
echo "[tls_system_default]" >> /etc/ssl/openssl.cnf
|
||||
echo "MinProtocol = TLSv1" >> /etc/ssl/openssl.cnf
|
||||
echo "CipherString = DEFAULT@SECLEVEL=0" >> /etc/ssl/openssl.cnf
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# Start Postfix
|
||||
postconf -c /opt/postfix/conf > /dev/null
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Postfix configuration error, refusing to start."
|
||||
exit 1
|
||||
else
|
||||
echo "Bootstrap succeeded. Starting Postfix..."
|
||||
postfix -c /opt/postfix/conf start
|
||||
sleep 126144000
|
||||
fi
|
||||
exec "$@"
|
||||
|
||||
530
data/Dockerfiles/postfix/postfix.sh
Executable file
530
data/Dockerfiles/postfix/postfix.sh
Executable file
@@ -0,0 +1,530 @@
|
||||
#!/bin/bash
|
||||
|
||||
trap "postfix stop" EXIT
|
||||
|
||||
[[ ! -d /opt/postfix/conf/sql/ ]] && mkdir -p /opt/postfix/conf/sql/
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
until dig +short mailcow.email > /dev/null; do
|
||||
echo "Waiting for DNS..."
|
||||
sleep 1
|
||||
done
|
||||
|
||||
cat <<EOF > /etc/aliases
|
||||
# Autogenerated by mailcow
|
||||
null: /dev/null
|
||||
watchdog: /dev/null
|
||||
ham: "|/usr/local/bin/rspamd-pipe-ham"
|
||||
spam: "|/usr/local/bin/rspamd-pipe-spam"
|
||||
EOF
|
||||
newaliases;
|
||||
|
||||
# create sni configuration
|
||||
if [[ "${SKIP_LETS_ENCRYPT}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
echo -n "" > /opt/postfix/conf/sni.map
|
||||
else
|
||||
echo -n "" > /opt/postfix/conf/sni.map;
|
||||
for cert_dir in /etc/ssl/mail/*/ ; do
|
||||
if [[ ! -f ${cert_dir}domains ]] || [[ ! -f ${cert_dir}cert.pem ]] || [[ ! -f ${cert_dir}key.pem ]]; then
|
||||
continue;
|
||||
fi
|
||||
IFS=" " read -r -a domains <<< "$(cat "${cert_dir}domains")"
|
||||
for domain in "${domains[@]}"; do
|
||||
echo -n "${domain} ${cert_dir}key.pem ${cert_dir}cert.pem" >> /opt/postfix/conf/sni.map;
|
||||
echo "" >> /opt/postfix/conf/sni.map;
|
||||
done
|
||||
done
|
||||
fi
|
||||
postmap -F hash:/opt/postfix/conf/sni.map;
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_ne.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT IF(EXISTS(SELECT address, domain FROM alias
|
||||
WHERE address = '%s'
|
||||
AND domain IN (
|
||||
SELECT domain FROM domain
|
||||
WHERE backupmx = '1'
|
||||
AND relay_all_recipients = '1'
|
||||
AND relay_unknown_only = '1')
|
||||
|
||||
), 'lmtp:inet:dovecot:24', NULL) AS 'transport'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_relay_recipient_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT DISTINCT
|
||||
CASE WHEN '%d' IN (
|
||||
SELECT domain FROM domain
|
||||
WHERE relay_all_recipients=1
|
||||
AND domain='%d'
|
||||
AND backupmx=1
|
||||
)
|
||||
THEN '%s' ELSE (
|
||||
SELECT goto FROM alias WHERE address='%s' AND active='1'
|
||||
)
|
||||
END AS result;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_policy_override_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT(policy, ' ', parameters) AS tls_policy FROM tls_policy_override WHERE active = '1' AND dest = '%s'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_tls_enforce_in_policy.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT IF(EXISTS(
|
||||
SELECT 'TLS_ACTIVE' FROM alias
|
||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE (address='%s'
|
||||
OR address IN (
|
||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
||||
WHERE alias_domain='%d'
|
||||
)
|
||||
) AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_in')) = '1' AND mailbox.active = '1'
|
||||
), 'reject_plaintext_session', NULL) AS 'tls_enforce_in';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_dependent_default_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT GROUP_CONCAT(transport SEPARATOR '') AS transport_maps
|
||||
FROM (
|
||||
SELECT IF(EXISTS(SELECT 'smtp_type' FROM alias
|
||||
LEFT OUTER JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE (address = '%s'
|
||||
OR address IN (
|
||||
SELECT CONCAT('%u', '@', target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
AND JSON_UNQUOTE(JSON_VALUE(attributes, '$.tls_enforce_out')) = '1'
|
||||
AND mailbox.active = '1'
|
||||
), 'smtp_enforced_tls:', 'smtp:') AS 'transport'
|
||||
UNION ALL
|
||||
SELECT COALESCE(
|
||||
(SELECT hostname FROM relayhosts
|
||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (
|
||||
mailbox.username IN (SELECT alias.goto from alias
|
||||
JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE alias.active = '1'
|
||||
AND alias.address = '%s'
|
||||
AND alias.address NOT LIKE '@%%'
|
||||
)
|
||||
)
|
||||
),
|
||||
(SELECT hostname FROM relayhosts
|
||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (domain.domain = '%d'
|
||||
OR domain.domain IN (
|
||||
SELECT target_domain FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
) AS transport_view;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT('smtp_via_transport_maps:', nexthop) AS transport FROM transports
|
||||
WHERE active = '1'
|
||||
AND destination = '%s';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_resource_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT 'null@localhost' FROM mailbox
|
||||
WHERE kind REGEXP 'location|thing|group' AND username = '%s';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_sender_dependent.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM relayhosts
|
||||
WHERE id IN (
|
||||
SELECT COALESCE(
|
||||
(SELECT id FROM relayhosts
|
||||
LEFT OUTER JOIN domain ON domain.relayhost = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (domain.domain = '%d'
|
||||
OR domain.domain IN (
|
||||
SELECT target_domain FROM alias_domain
|
||||
WHERE alias_domain = '%d'
|
||||
)
|
||||
)
|
||||
),
|
||||
(SELECT id FROM relayhosts
|
||||
LEFT OUTER JOIN mailbox ON JSON_UNQUOTE(JSON_VALUE(mailbox.attributes, '$.relayhost')) = relayhosts.id
|
||||
WHERE relayhosts.active = '1'
|
||||
AND (
|
||||
mailbox.username IN (
|
||||
SELECT alias.goto from alias
|
||||
JOIN mailbox ON mailbox.username = alias.goto
|
||||
WHERE alias.active = '1'
|
||||
AND alias.address = '%s'
|
||||
AND alias.address NOT LIKE '@%%'
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
AND active = '1'
|
||||
AND username != '';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sasl_passwd_maps_transport_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT_WS(':', username, password) AS auth_data FROM transports
|
||||
WHERE nexthop = '%s'
|
||||
AND active = '1'
|
||||
AND username != ''
|
||||
LIMIT 1;
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_domain_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT username FROM mailbox, alias_domain
|
||||
WHERE alias_domain.alias_domain = '%d'
|
||||
AND mailbox.username = CONCAT('%u', '@', alias_domain.target_domain)
|
||||
AND (mailbox.active = '1' OR mailbox.active = '2')
|
||||
AND alias_domain.active='1'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_alias_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT goto FROM alias
|
||||
WHERE address='%s'
|
||||
AND (active='1' OR active='2');
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_bcc_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT bcc_dest FROM bcc_maps
|
||||
WHERE local_dest='%s'
|
||||
AND type='rcpt'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_sender_bcc_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT bcc_dest FROM bcc_maps
|
||||
WHERE local_dest='%s'
|
||||
AND type='sender'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_recipient_canonical_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT new_dest FROM recipient_maps
|
||||
WHERE old_dest='%s'
|
||||
AND active='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_domains_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT alias_domain from alias_domain WHERE alias_domain='%s' AND active='1'
|
||||
UNION
|
||||
SELECT domain FROM domain
|
||||
WHERE domain='%s'
|
||||
AND active = '1'
|
||||
AND backupmx = '0'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_mailbox_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT(JSON_UNQUOTE(JSON_VALUE(attributes, '$.mailbox_format')), mailbox_path_prefix, '%d/%u/') FROM mailbox WHERE username='%s' AND (active = '1' OR active = '2')
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_relay_domain_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT domain FROM domain WHERE domain='%s' AND backupmx = '1' AND active = '1'
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_sender_acl.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
# First select queries domain and alias_domain to determine if domains are active.
|
||||
query = SELECT goto FROM alias
|
||||
WHERE id IN (
|
||||
SELECT COALESCE (
|
||||
(
|
||||
SELECT id FROM alias
|
||||
WHERE address='%s'
|
||||
AND (active='1' OR active='2')
|
||||
AND sender_allowed='1'
|
||||
), (
|
||||
SELECT id FROM alias
|
||||
WHERE address='@%d'
|
||||
AND (active='1' OR active='2')
|
||||
AND sender_allowed='1'
|
||||
)
|
||||
)
|
||||
)
|
||||
AND active='1'
|
||||
AND sender_allowed='1'
|
||||
AND (domain IN
|
||||
(SELECT domain FROM domain
|
||||
WHERE domain='%d'
|
||||
AND active='1')
|
||||
OR domain in (
|
||||
SELECT alias_domain FROM alias_domain
|
||||
WHERE alias_domain='%d'
|
||||
AND active='1'
|
||||
)
|
||||
)
|
||||
UNION
|
||||
SELECT logged_in_as FROM sender_acl
|
||||
WHERE send_as='@%d'
|
||||
OR send_as='%s'
|
||||
OR send_as='*'
|
||||
OR send_as IN (
|
||||
SELECT CONCAT('@',target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d')
|
||||
OR send_as IN (
|
||||
SELECT CONCAT('%u','@',target_domain) FROM alias_domain
|
||||
WHERE alias_domain = '%d')
|
||||
AND logged_in_as NOT IN (
|
||||
SELECT goto FROM alias
|
||||
WHERE address='%s')
|
||||
UNION
|
||||
SELECT username FROM mailbox, alias_domain
|
||||
WHERE alias_domain.alias_domain = '%d'
|
||||
AND mailbox.username = CONCAT('%u','@',alias_domain.target_domain)
|
||||
AND (mailbox.active = '1' OR mailbox.active ='2')
|
||||
AND alias_domain.active='1';
|
||||
EOF
|
||||
|
||||
# MX based routing
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_mbr_access_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT CONCAT('FILTER smtp_via_transport_maps:', nexthop) as transport FROM transports
|
||||
WHERE '%s' REGEXP destination
|
||||
AND active='1'
|
||||
AND is_mx_based='1';
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/sql/mysql_virtual_spamalias_maps.cf
|
||||
# Autogenerated by mailcow
|
||||
user = ${DBUSER}
|
||||
password = ${DBPASS}
|
||||
hosts = unix:/var/run/mysqld/mysqld.sock
|
||||
dbname = ${DBNAME}
|
||||
query = SELECT goto FROM spamalias
|
||||
WHERE address='%s'
|
||||
AND (validity >= UNIX_TIMESTAMP() OR permanent != 0)
|
||||
EOF
|
||||
|
||||
if [ ! -f /opt/postfix/conf/dns_blocklists.cf ]; then
|
||||
cat <<EOF > /opt/postfix/conf/dns_blocklists.cf
|
||||
# This file can be edited.
|
||||
# Delete this file and restart postfix container to revert any changes.
|
||||
postscreen_dnsbl_sites = wl.mailspike.net=127.0.0.[18;19;20]*-2
|
||||
hostkarma.junkemailfilter.com=127.0.0.1*-2
|
||||
list.dnswl.org=127.0.[0..255].0*-2
|
||||
list.dnswl.org=127.0.[0..255].1*-4
|
||||
list.dnswl.org=127.0.[0..255].2*-6
|
||||
list.dnswl.org=127.0.[0..255].3*-8
|
||||
bl.spamcop.net*2
|
||||
bl.suomispam.net*2
|
||||
hostkarma.junkemailfilter.com=127.0.0.2*3
|
||||
hostkarma.junkemailfilter.com=127.0.0.4*2
|
||||
hostkarma.junkemailfilter.com=127.0.1.2*1
|
||||
backscatter.spameatingmonkey.net*2
|
||||
bl.ipv6.spameatingmonkey.net*2
|
||||
bl.spameatingmonkey.net*2
|
||||
b.barracudacentral.org=127.0.0.2*7
|
||||
bl.mailspike.net=127.0.0.2*5
|
||||
bl.mailspike.net=127.0.0.[10;11;12]*4
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Remove discontinued DNSBLs from existing dns_blocklists.cf
|
||||
sed -i '/ix\.dnsbl\.manitu\.net\*2/d' /opt/postfix/conf/dns_blocklists.cf # Nixspam
|
||||
|
||||
DNSBL_CONFIG=$(grep -v '^#' /opt/postfix/conf/dns_blocklists.cf | grep '\S')
|
||||
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "\e[33mChecking if ASN for your IP is listed for Spamhaus Bad ASN List...\e[0m"
|
||||
if [ -n "$SPAMHAUS_DQS_KEY" ]; then
|
||||
echo -e "\e[32mDetected SPAMHAUS_DQS_KEY variable from mailcow.conf...\e[0m"
|
||||
echo -e "\e[33mUsing DQS Blocklists from Spamhaus!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[4..7]*6
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.[10;11]*8
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.3*4
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net=127.0.0.2*3
|
||||
postscreen_dnsbl_reply_map = texthash:/opt/postfix/conf/dnsbl_reply.map
|
||||
EOF
|
||||
|
||||
cat <<EOF > /opt/postfix/conf/dnsbl_reply.map
|
||||
# Autogenerated by mailcow, using Spamhaus DQS reply domains
|
||||
${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net sbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.xbl.dq.spamhaus.net xbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.pbl.dq.spamhaus.net pbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net zen.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net dbl.spamhaus.org
|
||||
${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net zrd.spamhaus.org
|
||||
EOF
|
||||
)
|
||||
else
|
||||
if [ -f "/opt/postfix/conf/dnsbl_reply.map" ]; then
|
||||
rm /opt/postfix/conf/dnsbl_reply.map
|
||||
fi
|
||||
response=$(curl --connect-timeout 15 --max-time 30 -s -o /dev/null -w "%{http_code}" "https://asn-check.mailcow.email")
|
||||
if [ "$response" -eq 503 ]; then
|
||||
echo -e "\e[31mThe AS of your IP is listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mNo SPAMHAUS_DQS_KEY found... Skipping Spamhaus blocklists entirely!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
elif [ "$response" -eq 200 ]; then
|
||||
echo -e "\e[32mThe AS of your IP is NOT listed as a banned AS from Spamhaus!\e[0m"
|
||||
echo -e "\e[33mUsing the open Spamhaus blocklists.\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=$(cat <<EOF
|
||||
zen.spamhaus.org=127.0.0.[10;11]*8
|
||||
zen.spamhaus.org=127.0.0.[4..7]*6
|
||||
zen.spamhaus.org=127.0.0.3*4
|
||||
zen.spamhaus.org=127.0.0.2*3
|
||||
EOF
|
||||
)
|
||||
|
||||
else
|
||||
echo -e "\e[31mWe couldn't determine your AS... (maybe DNS/Network issue?) Response Code: $response\e[0m"
|
||||
echo -e "\e[33mDeactivating Spamhaus DNS Blocklists to be on the safe site!\e[0m"
|
||||
SPAMHAUS_DNSBL_CONFIG=""
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Reset main.cf
|
||||
sed -i '/Overrides/q' /opt/postfix/conf/main.cf
|
||||
echo >> /opt/postfix/conf/main.cf
|
||||
# Append postscreen dnsbl sites to main.cf
|
||||
if [ ! -z "$DNSBL_CONFIG" ]; then
|
||||
echo -e "${DNSBL_CONFIG}\n${SPAMHAUS_DNSBL_CONFIG}" >> /opt/postfix/conf/main.cf
|
||||
fi
|
||||
# Append user overrides
|
||||
echo -e "\n# User Overrides" >> /opt/postfix/conf/main.cf
|
||||
touch /opt/postfix/conf/extra.cf
|
||||
sed -i '/\$myhostname/! { /myhostname/d }' /opt/postfix/conf/extra.cf
|
||||
echo -e "myhostname = ${MAILCOW_HOSTNAME}\n$(cat /opt/postfix/conf/extra.cf)" > /opt/postfix/conf/extra.cf
|
||||
cat /opt/postfix/conf/extra.cf >> /opt/postfix/conf/main.cf
|
||||
|
||||
if [ ! -f /opt/postfix/conf/custom_transport.pcre ]; then
|
||||
echo "Creating dummy custom_transport.pcre"
|
||||
touch /opt/postfix/conf/custom_transport.pcre
|
||||
fi
|
||||
|
||||
if [[ ! -f /opt/postfix/conf/custom_postscreen_whitelist.cidr ]]; then
|
||||
echo "Creating dummy custom_postscreen_whitelist.cidr"
|
||||
cat <<EOF > /opt/postfix/conf/custom_postscreen_whitelist.cidr
|
||||
# Autogenerated by mailcow
|
||||
# Rules are evaluated in the order as specified.
|
||||
# Blacklist 192.168.* except 192.168.0.1.
|
||||
# 192.168.0.1 permit
|
||||
# 192.168.0.0/16 reject
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Fix Postfix permissions
|
||||
chown -R root:postfix /opt/postfix/conf/sql/ /opt/postfix/conf/custom_transport.pcre
|
||||
chmod 640 /opt/postfix/conf/sql/*.cf /opt/postfix/conf/custom_transport.pcre
|
||||
chgrp -R postdrop /var/spool/postfix/public
|
||||
chgrp -R postdrop /var/spool/postfix/maildrop
|
||||
postfix set-permissions
|
||||
|
||||
# Checking if there is a leftover of a crashed postfix container before starting a new one
|
||||
if [ -e /var/spool/postfix/pid/master.pid ]; then
|
||||
rm -rf /var/spool/postfix/pid/master.pid
|
||||
fi
|
||||
|
||||
# Check Postfix configuration
|
||||
postconf -c /opt/postfix/conf > /dev/null
|
||||
|
||||
if [[ $? != 0 ]]; then
|
||||
echo "Postfix configuration error, refusing to start."
|
||||
exit 1
|
||||
else
|
||||
postfix -c /opt/postfix/conf start
|
||||
sleep 126144000
|
||||
fi
|
||||
@@ -11,8 +11,8 @@ stderr_logfile=/dev/stderr
|
||||
stderr_logfile_maxbytes=0
|
||||
autostart=true
|
||||
|
||||
[program:bootstrap]
|
||||
command=/docker-entrypoint.sh
|
||||
[program:postfix]
|
||||
command=/opt/postfix.sh
|
||||
stdout_logfile=/dev/stdout
|
||||
stdout_logfile_maxbytes=0
|
||||
stderr_logfile=/dev/stderr
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
FROM debian:bookworm-slim
|
||||
FROM debian:trixie-slim
|
||||
LABEL maintainer="The Infrastructure Company GmbH <info@servercow.de>"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG RSPAMD_VER=rspamd_3.11.1-1~ab0b44951
|
||||
ARG CODENAME=bookworm
|
||||
ARG RSPAMD_VER=rspamd_3.14.2-82~90302bc
|
||||
ARG CODENAME=trixie
|
||||
ENV LC_ALL=C
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
@@ -18,7 +18,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
procps \
|
||||
nano \
|
||||
lua-cjson \
|
||||
python3 python3-pip \
|
||||
&& arch=$(arch | sed s/aarch64/arm64/ | sed s/x86_64/amd64/) \
|
||||
&& wget -P /tmp https://rspamd.com/apt-stable/pool/main/r/rspamd/${RSPAMD_VER}~${CODENAME}_${arch}.deb\
|
||||
&& apt install -y /tmp/${RSPAMD_VER}~${CODENAME}_${arch}.deb \
|
||||
@@ -30,20 +29,12 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
&& echo 'alias ll="ls -la --color"' >> ~/.bashrc \
|
||||
&& sed -i 's/#analysis_keyword_table > 0/analysis_cat_table.macro_exist == "M"/g' /usr/share/rspamd/lualib/lua_scanners/oletools.lua
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/rspamd/settings.conf /etc/rspamd/settings.conf
|
||||
COPY data/Dockerfiles/rspamd/set_worker_password.sh /set_worker_password.sh
|
||||
COPY data/Dockerfiles/rspamd/docker-entrypoint.sh /docker-entrypoint.sh
|
||||
COPY settings.conf /etc/rspamd/settings.conf
|
||||
COPY set_worker_password.sh /set_worker_password.sh
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
STOPSIGNAL SIGTERM
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
CMD ["/usr/bin/rspamd", "-f", "-u", "_rspamd", "-g", "_rspamd"]
|
||||
|
||||
@@ -1,5 +1,146 @@
|
||||
#!/bin/bash
|
||||
|
||||
until nc phpfpm 9001 -z; do
|
||||
echo "Waiting for PHP on port 9001..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
until nc phpfpm 9002 -z; do
|
||||
echo "Waiting for PHP on port 9002..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
mkdir -p /etc/rspamd/plugins.d \
|
||||
/etc/rspamd/custom
|
||||
|
||||
touch /etc/rspamd/rspamd.conf.local \
|
||||
/etc/rspamd/rspamd.conf.override
|
||||
|
||||
chmod 755 /var/lib/rspamd
|
||||
|
||||
|
||||
[[ ! -f /etc/rspamd/override.d/worker-controller-password.inc ]] && echo '# Autogenerated by mailcow' > /etc/rspamd/override.d/worker-controller-password.inc
|
||||
|
||||
echo ${IPV4_NETWORK}.0/24 > /etc/rspamd/custom/mailcow_networks.map
|
||||
echo ${IPV6_NETWORK} >> /etc/rspamd/custom/mailcow_networks.map
|
||||
|
||||
DOVECOT_V4=
|
||||
DOVECOT_V6=
|
||||
until [[ ! -z ${DOVECOT_V4} ]]; do
|
||||
DOVECOT_V4=$(dig a dovecot +short)
|
||||
DOVECOT_V6=$(dig aaaa dovecot +short)
|
||||
[[ ! -z ${DOVECOT_V4} ]] && break;
|
||||
echo "Waiting for Dovecot..."
|
||||
sleep 3
|
||||
done
|
||||
echo ${DOVECOT_V4}/32 > /etc/rspamd/custom/dovecot_trusted.map
|
||||
if [[ ! -z ${DOVECOT_V6} ]]; then
|
||||
echo ${DOVECOT_V6}/128 >> /etc/rspamd/custom/dovecot_trusted.map
|
||||
fi
|
||||
|
||||
RSPAMD_V4=
|
||||
RSPAMD_V6=
|
||||
until [[ ! -z ${RSPAMD_V4} ]]; do
|
||||
RSPAMD_V4=$(dig a rspamd +short)
|
||||
RSPAMD_V6=$(dig aaaa rspamd +short)
|
||||
[[ ! -z ${RSPAMD_V4} ]] && break;
|
||||
echo "Waiting for Rspamd..."
|
||||
sleep 3
|
||||
done
|
||||
echo ${RSPAMD_V4}/32 > /etc/rspamd/custom/rspamd_trusted.map
|
||||
if [[ ! -z ${RSPAMD_V6} ]]; then
|
||||
echo ${RSPAMD_V6}/128 >> /etc/rspamd/custom/rspamd_trusted.map
|
||||
fi
|
||||
|
||||
if [[ ! -z ${REDIS_SLAVEOF_IP} ]]; then
|
||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
read_servers = "redis:6379";
|
||||
write_servers = "${REDIS_SLAVEOF_IP}:${REDIS_SLAVEOF_PORT}";
|
||||
password = "${REDISPASS}";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @redis-mailcow..."
|
||||
sleep 2
|
||||
done
|
||||
until [[ $(redis-cli -h ${REDIS_SLAVEOF_IP} -p ${REDIS_SLAVEOF_PORT} -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis @${REDIS_SLAVEOF_IP}..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF ${REDIS_SLAVEOF_IP} ${REDIS_SLAVEOF_PORT}
|
||||
else
|
||||
cat <<EOF > /etc/rspamd/local.d/redis.conf
|
||||
servers = "redis:6379";
|
||||
password = "${REDISPASS}";
|
||||
timeout = 10;
|
||||
EOF
|
||||
until [[ $(redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning PING) == "PONG" ]]; do
|
||||
echo "Waiting for Redis slave..."
|
||||
sleep 2
|
||||
done
|
||||
redis-cli -h redis-mailcow -a ${REDISPASS} --no-auth-warning SLAVEOF NO ONE
|
||||
fi
|
||||
|
||||
if [[ "${SKIP_OLEFY}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
if [[ -f /etc/rspamd/local.d/external_services.conf ]]; then
|
||||
rm /etc/rspamd/local.d/external_services.conf
|
||||
fi
|
||||
else
|
||||
if [[ ! -f /etc/rspamd/local.d/external_services.conf ]]; then
|
||||
cat <<EOF > /etc/rspamd/local.d/external_services.conf
|
||||
oletools {
|
||||
# default olefy settings
|
||||
servers = "olefy:10055";
|
||||
# needs to be set explicitly for Rspamd < 1.9.5
|
||||
scan_mime_parts = true;
|
||||
# mime-part regex matching in content-type or filename
|
||||
# block all macros
|
||||
extended = true;
|
||||
max_size = 3145728;
|
||||
timeout = 20.0;
|
||||
retransmits = 1;
|
||||
}
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
# Provide additional lua modules
|
||||
ln -s /usr/lib/$(uname -m)-linux-gnu/liblua5.1-cjson.so.0.0.0 /usr/lib/rspamd/cjson.so
|
||||
|
||||
chown -R _rspamd:_rspamd /var/lib/rspamd \
|
||||
/etc/rspamd/local.d \
|
||||
/etc/rspamd/override.d \
|
||||
/etc/rspamd/rspamd.conf.local \
|
||||
/etc/rspamd/rspamd.conf.override \
|
||||
/etc/rspamd/plugins.d
|
||||
|
||||
# Fix missing default global maps, if any
|
||||
# These exists in mailcow UI and should not be removed
|
||||
touch /etc/rspamd/custom/global_mime_from_blacklist.map \
|
||||
/etc/rspamd/custom/global_rcpt_blacklist.map \
|
||||
/etc/rspamd/custom/global_smtp_from_blacklist.map \
|
||||
/etc/rspamd/custom/global_mime_from_whitelist.map \
|
||||
/etc/rspamd/custom/global_rcpt_whitelist.map \
|
||||
/etc/rspamd/custom/global_smtp_from_whitelist.map \
|
||||
/etc/rspamd/custom/bad_languages.map \
|
||||
/etc/rspamd/custom/sa-rules \
|
||||
/etc/rspamd/custom/dovecot_trusted.map \
|
||||
/etc/rspamd/custom/rspamd_trusted.map \
|
||||
/etc/rspamd/custom/mailcow_networks.map \
|
||||
/etc/rspamd/custom/ip_wl.map \
|
||||
/etc/rspamd/custom/fishy_tlds.map \
|
||||
/etc/rspamd/custom/bad_words.map \
|
||||
/etc/rspamd/custom/bad_asn.map \
|
||||
/etc/rspamd/custom/bad_words_de.map \
|
||||
/etc/rspamd/custom/bulk_header.map \
|
||||
/etc/rspamd/custom/bad_header.map
|
||||
|
||||
# www-data (82) group needs to write to these files
|
||||
chown _rspamd:_rspamd /etc/rspamd/custom/
|
||||
chmod 0755 /etc/rspamd/custom/.
|
||||
chown -R 82:82 /etc/rspamd/custom/*
|
||||
chmod 644 -R /etc/rspamd/custom/*
|
||||
|
||||
# Run hooks
|
||||
for file in /hooks/*; do
|
||||
if [ -x "${file}" ]; then
|
||||
@@ -8,13 +149,190 @@ for file in /hooks/*; do
|
||||
fi
|
||||
done
|
||||
|
||||
python3 -u /bootstrap/main.py
|
||||
BOOTSTRAP_EXIT_CODE=$?
|
||||
# If DQS KEY is set in mailcow.conf add Spamhaus DQS RBLs
|
||||
if [[ ! -z ${SPAMHAUS_DQS_KEY} ]]; then
|
||||
cat <<EOF > /etc/rspamd/custom/dqs-rbl.conf
|
||||
# Autogenerated by mailcow. DO NOT TOUCH!
|
||||
spamhaus {
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
from = false;
|
||||
}
|
||||
spamhaus_from {
|
||||
from = true;
|
||||
received = false;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
returncodes {
|
||||
SPAMHAUS_ZEN = [ "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7", "127.0.0.9", "127.0.0.10", "127.0.0.11" ];
|
||||
}
|
||||
}
|
||||
spamhaus_authbl_received {
|
||||
# Check if the sender client is listed in AuthBL (AuthBL is *not* part of ZEN)
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.authbl.dq.spamhaus.net";
|
||||
from = false;
|
||||
received = true;
|
||||
ipv6 = true;
|
||||
returncodes {
|
||||
SH_AUTHBL_RECEIVED = "127.0.0.20"
|
||||
}
|
||||
}
|
||||
spamhaus_dbl {
|
||||
# Add checks on the HELO string
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_DBL_SPAM = "127.0.1.2";
|
||||
RBL_DBL_PHISH = "127.0.1.4";
|
||||
RBL_DBL_MALWARE = "127.0.1.5";
|
||||
RBL_DBL_BOTNET = "127.0.1.6";
|
||||
RBL_DBL_ABUSED_SPAM = "127.0.1.102";
|
||||
RBL_DBL_ABUSED_PHISH = "127.0.1.104";
|
||||
RBL_DBL_ABUSED_MALWARE = "127.0.1.105";
|
||||
RBL_DBL_ABUSED_BOTNET = "127.0.1.106";
|
||||
RBL_DBL_DONT_QUERY_IPS = "127.0.1.255";
|
||||
}
|
||||
}
|
||||
spamhaus_dbl_fullurls {
|
||||
ignore_defaults = true;
|
||||
no_ip = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
selector = 'urls:get_host'
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
DBLABUSED_SPAM_FULLURLS = "127.0.1.102";
|
||||
DBLABUSED_PHISH_FULLURLS = "127.0.1.104";
|
||||
DBLABUSED_MALWARE_FULLURLS = "127.0.1.105";
|
||||
DBLABUSED_BOTNET_FULLURLS = "127.0.1.106";
|
||||
}
|
||||
}
|
||||
spamhaus_zrd {
|
||||
# Add checks on the HELO string also for DQS
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
helo = true;
|
||||
rdns = true;
|
||||
dkim = true;
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
RBL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
RBL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
RBL_ZRD_DONT_QUERY_IPS = "127.0.2.255";
|
||||
}
|
||||
}
|
||||
"SPAMHAUS_ZEN_URIBL" {
|
||||
enabled = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zen.dq.spamhaus.net";
|
||||
resolve_ip = true;
|
||||
checks = ['urls'];
|
||||
replyto = true;
|
||||
emails = true;
|
||||
ipv4 = true;
|
||||
ipv6 = true;
|
||||
emails_domainonly = true;
|
||||
returncodes {
|
||||
URIBL_SBL = "127.0.0.2";
|
||||
URIBL_SBL_CSS = "127.0.0.3";
|
||||
URIBL_XBL = ["127.0.0.4", "127.0.0.5", "127.0.0.6", "127.0.0.7"];
|
||||
URIBL_PBL = ["127.0.0.10", "127.0.0.11"];
|
||||
URIBL_DROP = "127.0.0.9";
|
||||
}
|
||||
}
|
||||
SH_EMAIL_DBL {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_DBL = [
|
||||
"127.0.1.2",
|
||||
"127.0.1.4",
|
||||
"127.0.1.5",
|
||||
"127.0.1.6"
|
||||
];
|
||||
SH_EMAIL_DBL_ABUSED = [
|
||||
"127.0.1.102",
|
||||
"127.0.1.104",
|
||||
"127.0.1.105",
|
||||
"127.0.1.106"
|
||||
];
|
||||
SH_EMAIL_DBL_DONT_QUERY_IPS = [ "127.0.1.255" ];
|
||||
}
|
||||
}
|
||||
SH_EMAIL_ZRD {
|
||||
ignore_defaults = true;
|
||||
replyto = true;
|
||||
emails_domainonly = true;
|
||||
disable_monitoring = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
returncodes = {
|
||||
SH_EMAIL_ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
SH_EMAIL_ZRD_FRESH_DOMAIN = [
|
||||
"127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"
|
||||
];
|
||||
SH_EMAIL_ZRD_DONT_QUERY_IPS = [ "127.0.2.255" ];
|
||||
}
|
||||
}
|
||||
"DBL" {
|
||||
# override the defaults for DBL defined in modules.d/rbl.conf
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.dbl.dq.spamhaus.net";
|
||||
disable_monitoring = true;
|
||||
}
|
||||
"ZRD" {
|
||||
ignore_defaults = true;
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.zrd.dq.spamhaus.net";
|
||||
no_ip = true;
|
||||
dkim = true;
|
||||
emails = true;
|
||||
emails_domainonly = true;
|
||||
urls = true;
|
||||
returncodes = {
|
||||
ZRD_VERY_FRESH_DOMAIN = ["127.0.2.2", "127.0.2.3", "127.0.2.4"];
|
||||
ZRD_FRESH_DOMAIN = ["127.0.2.5", "127.0.2.6", "127.0.2.7", "127.0.2.8", "127.0.2.9", "127.0.2.10", "127.0.2.11", "127.0.2.12", "127.0.2.13", "127.0.2.14", "127.0.2.15", "127.0.2.16", "127.0.2.17", "127.0.2.18", "127.0.2.19", "127.0.2.20", "127.0.2.21", "127.0.2.22", "127.0.2.23", "127.0.2.24"];
|
||||
}
|
||||
}
|
||||
spamhaus_sbl_url {
|
||||
ignore_defaults = true
|
||||
rbl = "${SPAMHAUS_DQS_KEY}.sbl.dq.spamhaus.net";
|
||||
checks = ['urls'];
|
||||
disable_monitoring = true;
|
||||
returncodes {
|
||||
SPAMHAUS_SBL_URL = "127.0.0.2";
|
||||
}
|
||||
}
|
||||
|
||||
if [ $BOOTSTRAP_EXIT_CODE -ne 0 ]; then
|
||||
echo "Bootstrap failed with exit code $BOOTSTRAP_EXIT_CODE. Not starting Rspamd."
|
||||
exit $BOOTSTRAP_EXIT_CODE
|
||||
SH_HBL_EMAIL {
|
||||
ignore_defaults = true;
|
||||
rbl = "_email.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net";
|
||||
emails_domainonly = false;
|
||||
selector = "from('smtp').lower;from('mime').lower";
|
||||
ignore_whitelist = true;
|
||||
checks = ['emails', 'replyto'];
|
||||
hash = "sha1";
|
||||
returncodes = {
|
||||
SH_HBL_EMAIL = [
|
||||
"127.0.3.2"
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
spamhaus_dqs_hbl {
|
||||
symbol = "HBL_FILE_UNKNOWN";
|
||||
rbl = "_file.${SPAMHAUS_DQS_KEY}.hbl.dq.spamhaus.net.";
|
||||
selector = "attachments('rbase32', 'sha256')";
|
||||
ignore_whitelist = true;
|
||||
ignore_defaults = true;
|
||||
returncodes {
|
||||
SH_HBL_FILE_MALICIOUS = "127.0.3.10";
|
||||
SH_HBL_FILE_SUSPICIOUS = "127.0.3.15";
|
||||
}
|
||||
}
|
||||
EOF
|
||||
else
|
||||
rm -rf /etc/rspamd/custom/dqs-rbl.conf
|
||||
fi
|
||||
|
||||
echo "Bootstrap succeeded. Starting Rspamd..."
|
||||
exec "$@"
|
||||
|
||||
@@ -6,7 +6,7 @@ ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG DEBIAN_VERSION=bookworm
|
||||
ARG SOGO_DEBIAN_REPOSITORY=https://packagingv2.sogo.nu/sogo-nightly-debian/
|
||||
# renovate: datasource=github-releases depName=tianon/gosu versioning=semver-coerced extractVersion=^(?<version>.*)$
|
||||
ARG GOSU_VERSION=1.17
|
||||
ARG GOSU_VERSION=1.19
|
||||
ENV LC_ALL=C
|
||||
|
||||
# Prerequisites
|
||||
@@ -27,7 +27,6 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
psmisc \
|
||||
wget \
|
||||
patch \
|
||||
python3 python3-pip \
|
||||
&& dpkgArch="$(dpkg --print-architecture | awk -F- '{ print $NF }')" \
|
||||
&& wget -O /usr/local/bin/gosu "https://github.com/tianon/gosu/releases/download/$GOSU_VERSION/gosu-$dpkgArch" \
|
||||
&& chmod +x /usr/local/bin/gosu \
|
||||
@@ -43,21 +42,18 @@ RUN echo "Building from repository $SOGO_DEBIAN_REPOSITORY" \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& touch /etc/default/locale
|
||||
|
||||
RUN pip install --break-system-packages \
|
||||
mysql-connector-python \
|
||||
jinja2 \
|
||||
redis \
|
||||
dnspython \
|
||||
psutil
|
||||
COPY ./bootstrap-sogo.sh /bootstrap-sogo.sh
|
||||
COPY syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY acl.diff /acl.diff
|
||||
COPY navMailcowBtns.diff /navMailcowBtns.diff
|
||||
COPY stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY docker-entrypoint.sh /
|
||||
|
||||
RUN chmod +x /bootstrap-sogo.sh \
|
||||
/usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
COPY data/Dockerfiles/bootstrap /bootstrap
|
||||
COPY data/Dockerfiles/sogo/syslog-ng.conf /etc/syslog-ng/syslog-ng.conf
|
||||
COPY data/Dockerfiles/sogo/syslog-ng-redis_slave.conf /etc/syslog-ng/syslog-ng-redis_slave.conf
|
||||
COPY data/Dockerfiles/sogo/supervisord.conf /etc/supervisor/supervisord.conf
|
||||
COPY data/Dockerfiles/sogo/stop-supervisor.sh /usr/local/sbin/stop-supervisor.sh
|
||||
COPY data/Dockerfiles/sogo/docker-entrypoint.sh /
|
||||
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||
|
||||
RUN chmod +x /usr/local/sbin/stop-supervisor.sh
|
||||
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/supervisord.conf"]
|
||||
11
data/Dockerfiles/sogo/acl.diff
Normal file
11
data/Dockerfiles/sogo/acl.diff
Normal file
@@ -0,0 +1,11 @@
|
||||
--- /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:57.987504204 +0200
|
||||
+++ /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox 2018-08-17 18:29:35.918291298 +0200
|
||||
@@ -46,7 +46,7 @@
|
||||
</md-item-template>
|
||||
</md-autocomplete>
|
||||
</div>
|
||||
- <md-card ng-repeat="user in acl.users | orderBy:['userClass', 'cn']"
|
||||
+ <md-card ng-repeat="user in acl.users | filter:{ userClass: 'normal' } | orderBy:['cn']"
|
||||
class="sg-collapsed"
|
||||
ng-class="{ 'sg-expanded': user.uid == acl.selectedUid }">
|
||||
<a class="md-flex md-button" ng-click="acl.selectUser(user, $event)">
|
||||
157
data/Dockerfiles/sogo/bootstrap-sogo.sh
Executable file
157
data/Dockerfiles/sogo/bootstrap-sogo.sh
Executable file
@@ -0,0 +1,157 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Wait for MySQL to warm-up
|
||||
while ! mariadb-admin status --ssl=false --socket=/var/run/mysqld/mysqld.sock -u${DBUSER} -p${DBPASS} --silent; do
|
||||
echo "Waiting for database to come up..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Wait until port becomes free and send sig
|
||||
until ! nc -z sogo-mailcow 20000;
|
||||
do
|
||||
killall -TERM sogod
|
||||
sleep 3
|
||||
done
|
||||
|
||||
# Wait for updated schema
|
||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
while [[ "${DBV_NOW}" != "${DBV_NEW}" ]]; do
|
||||
echo "Waiting for schema update..."
|
||||
DBV_NOW=$(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT version FROM versions WHERE application = 'db_schema';" -BN)
|
||||
DBV_NEW=$(grep -oE '\$db_version = .*;' init_db.inc.php | sed 's/$db_version = //g;s/;//g' | cut -d \" -f2)
|
||||
sleep 5
|
||||
done
|
||||
echo "DB schema is ${DBV_NOW}"
|
||||
|
||||
if [[ "${MASTER}" =~ ^([yY][eE][sS]|[yY])+$ ]]; then
|
||||
mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "DROP TRIGGER IF EXISTS sogo_update_password"
|
||||
fi
|
||||
|
||||
# cat /dev/urandom seems to hang here occasionally and is not recommended anyway, better use openssl
|
||||
RAND_PASS=$(openssl rand -base64 16 | tr -dc _A-Z-a-z-0-9)
|
||||
|
||||
# Generate plist header with timezone data
|
||||
mkdir -p /var/lib/sogo/GNUstep/Defaults/
|
||||
cat <<EOF > /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//GNUstep//DTD plist 0.9//EN" "http://www.gnustep.org/plist-0_9.xml">
|
||||
<plist version="0.9">
|
||||
<dict>
|
||||
<key>OCSAclURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_acl</string>
|
||||
<key>SOGoIMAPServer</key>
|
||||
<string>imap://${IPV4_NETWORK}.250:143/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoSieveServer</key>
|
||||
<string>sieve://${IPV4_NETWORK}.250:4190/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoSMTPServer</key>
|
||||
<string>smtp://${IPV4_NETWORK}.253:588/?TLS=YES&tlsVerifyMode=none</string>
|
||||
<key>SOGoTrustProxyAuthentication</key>
|
||||
<string>YES</string>
|
||||
<key>SOGoEncryptionKey</key>
|
||||
<string>${RAND_PASS}</string>
|
||||
<key>OCSAdminURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_admin</string>
|
||||
<key>OCSCacheFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_cache_folder</string>
|
||||
<key>OCSEMailAlarmsFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_alarms_folder</string>
|
||||
<key>OCSFolderInfoURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_folder_info</string>
|
||||
<key>OCSSessionsFolderURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_sessions_folder</string>
|
||||
<key>OCSStoreURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_store</string>
|
||||
<key>SOGoProfileURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/sogo_user_profile</string>
|
||||
<key>SOGoTimeZone</key>
|
||||
<string>${TZ}</string>
|
||||
<key>domains</key>
|
||||
<dict>
|
||||
EOF
|
||||
|
||||
# Generate multi-domain setup
|
||||
while read -r line gal
|
||||
do
|
||||
echo " <key>${line}</key>
|
||||
<dict>
|
||||
<key>SOGoMailDomain</key>
|
||||
<string>${line}</string>
|
||||
<key>SOGoUserSources</key>
|
||||
<array>
|
||||
<dict>
|
||||
<key>MailFieldNames</key>
|
||||
<array>
|
||||
<string>aliases</string>
|
||||
<string>ad_aliases</string>
|
||||
<string>ext_acl</string>
|
||||
</array>
|
||||
<key>KindFieldName</key>
|
||||
<string>kind</string>
|
||||
<key>DomainFieldName</key>
|
||||
<string>domain</string>
|
||||
<key>MultipleBookingsFieldName</key>
|
||||
<string>multiple_bookings</string>
|
||||
<key>listRequiresDot</key>
|
||||
<string>NO</string>
|
||||
<key>canAuthenticate</key>
|
||||
<string>YES</string>
|
||||
<key>displayName</key>
|
||||
<string>GAL ${line}</string>
|
||||
<key>id</key>
|
||||
<string>${line}</string>
|
||||
<key>isAddressBook</key>
|
||||
<string>${gal}</string>
|
||||
<key>type</key>
|
||||
<string>sql</string>
|
||||
<key>userPasswordAlgorithm</key>
|
||||
<string>${MAILCOW_PASS_SCHEME}</string>
|
||||
<key>prependPasswordScheme</key>
|
||||
<string>YES</string>
|
||||
<key>viewURL</key>
|
||||
<string>mysql://${DBUSER}:${DBPASS}@%2Fvar%2Frun%2Fmysqld%2Fmysqld.sock/${DBNAME}/_sogo_static_view</string>
|
||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
# Generate alternative LDAP authentication dict, when SQL authentication fails
|
||||
# This will nevertheless read attributes from LDAP
|
||||
/etc/sogo/plist_ldap.sh ${line} ${gal} >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
echo " </array>
|
||||
</dict>" >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
done < <(mariadb --skip-ssl --socket=/var/run/mysqld/mysqld.sock -u ${DBUSER} -p${DBPASS} ${DBNAME} -e "SELECT domain, CASE gal WHEN '1' THEN 'YES' ELSE 'NO' END AS gal FROM domain;" -B -N)
|
||||
|
||||
# Generate footer
|
||||
echo ' </dict>
|
||||
</dict>
|
||||
</plist>' >> /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
|
||||
# Fix permissions
|
||||
chown sogo:sogo -R /var/lib/sogo/
|
||||
chmod 600 /var/lib/sogo/GNUstep/Defaults/sogod.plist
|
||||
|
||||
# Patch ACLs
|
||||
#if [[ ${ACL_ANYONE} == 'allow' ]]; then
|
||||
# #enable any or authenticated targets for ACL
|
||||
# if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
||||
# patch -R /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
||||
# fi
|
||||
#else
|
||||
# #disable any or authenticated targets for ACL
|
||||
# if patch -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff > /dev/null; then
|
||||
# patch /usr/lib/GNUstep/SOGo/Templates/UIxAclEditor.wox < /acl.diff;
|
||||
# fi
|
||||
#fi
|
||||
|
||||
if patch -R -sfN --dry-run /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff > /dev/null; then
|
||||
patch -R /usr/lib/GNUstep/SOGo/Templates/UIxTopnavToolbar.wox < /navMailcowBtns.diff;
|
||||
fi
|
||||
|
||||
# Rename custom logo, if any
|
||||
[[ -f /etc/sogo/sogo-full.svg ]] && mv /etc/sogo/sogo-full.svg /etc/sogo/custom-fulllogo.svg
|
||||
|
||||
# Rsync web content
|
||||
echo "Syncing web content with named volume"
|
||||
rsync -a /usr/lib/GNUstep/SOGo/. /sogo_web/
|
||||
|
||||
# Chown backup path
|
||||
chown -R sogo:sogo /sogo_backup
|
||||
|
||||
exec gosu sogo /usr/sbin/sogod
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user