mirror of
https://github.com/pocket-id/pocket-id.git
synced 2026-02-20 03:39:52 +00:00
Compare commits
123 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
646f849441 | ||
|
|
20bbd4a06f | ||
|
|
2d7e2ec8df | ||
|
|
72009ced67 | ||
|
|
4881130ead | ||
|
|
d6a7b503ff | ||
|
|
3c3916536e | ||
|
|
a24b2afb7b | ||
|
|
7c34501055 | ||
|
|
ba00f40bd4 | ||
|
|
2f651adf3b | ||
|
|
f42ba3bbef | ||
|
|
2341da99e9 | ||
|
|
2cce200892 | ||
|
|
cd2e9f3a2a | ||
|
|
f5e2c68ba3 | ||
|
|
651b58aee6 | ||
|
|
ffb2ef91bd | ||
|
|
4776b70d96 | ||
|
|
579cfdc678 | ||
|
|
e4a8ca476c | ||
|
|
386add08c4 | ||
|
|
894eaf3cff | ||
|
|
d9e7bf9eef | ||
|
|
b19d901618 | ||
|
|
0b625a9707 | ||
|
|
e60b80632f | ||
|
|
078152d4db | ||
|
|
ba2f0f18f4 | ||
|
|
3420a00073 | ||
|
|
f0144584af | ||
|
|
e1c5021eee | ||
|
|
c0e490c28f | ||
|
|
3c98c98fe3 | ||
|
|
1bc9f5f7e7 | ||
|
|
461293ba1d | ||
|
|
7c5ffbf9a5 | ||
|
|
f75cef83d5 | ||
|
|
e358c433f0 | ||
|
|
08e4ffeb60 | ||
|
|
59ca6b26ac | ||
|
|
f5da11b99b | ||
|
|
3eaf36aae7 | ||
|
|
0a6ff6f84b | ||
|
|
edb32d82b2 | ||
|
|
90f555f7c1 | ||
|
|
177ada10ba | ||
|
|
91b0d74c43 | ||
|
|
3a1dd3168e | ||
|
|
25f67bd25a | ||
|
|
e3483a9c78 | ||
|
|
95d49256f6 | ||
|
|
8cddcb88e8 | ||
|
|
a25d6ef56c | ||
|
|
14c7471b52 | ||
|
|
5d6a7fdb58 | ||
|
|
a1cd3251cd | ||
|
|
4eeb06f29d | ||
|
|
b2c718d13d | ||
|
|
8d30346f64 | ||
|
|
714b7744f0 | ||
|
|
d98c0a391a | ||
|
|
4fe56a8d5c | ||
|
|
cfc9e464d9 | ||
|
|
3d46badb3c | ||
|
|
f523f39483 | ||
|
|
4bde271b47 | ||
|
|
a3c968758a | ||
|
|
ca888b3dd2 | ||
|
|
ce88686c5f | ||
|
|
a9b6635126 | ||
|
|
e817f042ec | ||
|
|
c56afe016e | ||
|
|
a54b867105 | ||
|
|
29a1d3b778 | ||
|
|
12125713a2 | ||
|
|
ab9c0f9ac0 | ||
|
|
42b872d6b2 | ||
|
|
bfd71d090c | ||
|
|
d5e0cfd4a6 | ||
|
|
9981304b4b | ||
|
|
5cf73e9309 | ||
|
|
f125cf0dad | ||
|
|
6a038fcf9a | ||
|
|
76e0192cee | ||
|
|
3ebf94dd84 | ||
|
|
7ec57437ac | ||
|
|
ed2c7b2303 | ||
|
|
e03270eb9d | ||
|
|
d683d18d91 | ||
|
|
f184120890 | ||
|
|
04d8500910 | ||
|
|
93639dddb2 | ||
|
|
a190529117 | ||
|
|
73392b5837 | ||
|
|
65616f65e5 | ||
|
|
98a99fbb0a | ||
|
|
3f3b6b88fd | ||
|
|
8f98d8c0b4 | ||
|
|
c9308472a9 | ||
|
|
6362ff9861 | ||
|
|
10d640385f | ||
|
|
47927d1574 | ||
|
|
b356cef766 | ||
|
|
9fc45930a8 | ||
|
|
028d1c858e | ||
|
|
eb3963d0fc | ||
|
|
35d913f905 | ||
|
|
32485f4c7c | ||
|
|
ceb38b0825 | ||
|
|
c0b6ede5be | ||
|
|
c20e93b55c | ||
|
|
24ca6a106d | ||
|
|
9f0aa55be6 | ||
|
|
068fcc65a6 | ||
|
|
f2dfb3da5d | ||
|
|
cbf0e3117d | ||
|
|
694f266dea | ||
|
|
29fc185376 | ||
|
|
781be37416 | ||
|
|
b1f97e05a1 | ||
|
|
2c74865173 | ||
|
|
ad8a90c839 |
14
.env.example
14
.env.example
@@ -1,6 +1,18 @@
|
||||
# See the documentation for more information: https://pocket-id.org/docs/configuration/environment-variables
|
||||
|
||||
# These variables must be configured for your deployment:
|
||||
APP_URL=https://your-pocket-id-domain.com
|
||||
|
||||
# Encryption key (choose one method):
|
||||
# Method 1: Direct key (simple but less secure)
|
||||
# Generate with: openssl rand -base64 32
|
||||
ENCRYPTION_KEY=
|
||||
# Method 2: File-based key (recommended)
|
||||
# Put the base64 key in a file and point to it here.
|
||||
# ENCRYPTION_KEY_FILE=/path/to/encryption_key
|
||||
|
||||
# These variables are optional but recommended to review:
|
||||
TRUST_PROXY=false
|
||||
MAXMIND_LICENSE_KEY=
|
||||
PUID=1000
|
||||
PGID=1000
|
||||
PGID=1000
|
||||
6
.github/workflows/backend-linter.yml
vendored
6
.github/workflows/backend-linter.yml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
paths:
|
||||
- "backend/**"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [main, breaking/**]
|
||||
paths:
|
||||
- "backend/**"
|
||||
|
||||
@@ -24,10 +24,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: backend/go.mod
|
||||
|
||||
|
||||
18
.github/workflows/build-next.yml
vendored
18
.github/workflows/build-next.yml
vendored
@@ -19,22 +19,20 @@ jobs:
|
||||
attestations: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: pnpm-lock.yaml
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'backend/go.mod'
|
||||
go-version-file: "backend/go.mod"
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
@@ -74,7 +72,7 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ env.DOCKER_IMAGE_NAME }}:next
|
||||
file: Dockerfile-prebuilt
|
||||
file: docker/Dockerfile-prebuilt
|
||||
- name: Build and push container image (distroless)
|
||||
uses: docker/build-push-action@v6
|
||||
id: container-build-push-distroless
|
||||
@@ -83,16 +81,16 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ env.DOCKER_IMAGE_NAME }}:next-distroless
|
||||
file: Dockerfile-distroless
|
||||
file: docker/Dockerfile-distroless
|
||||
- name: Container image attestation
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-name: '${{ env.DOCKER_IMAGE_NAME }}'
|
||||
subject-name: "${{ env.DOCKER_IMAGE_NAME }}"
|
||||
subject-digest: ${{ steps.build-push-image.outputs.digest }}
|
||||
push-to-registry: true
|
||||
- name: Container image attestation (distroless)
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-name: '${{ env.DOCKER_IMAGE_NAME }}'
|
||||
subject-name: "${{ env.DOCKER_IMAGE_NAME }}"
|
||||
subject-digest: ${{ steps.container-build-push-distroless.outputs.digest }}
|
||||
push-to-registry: true
|
||||
|
||||
151
.github/workflows/e2e-tests.yml
vendored
151
.github/workflows/e2e-tests.yml
vendored
@@ -3,15 +3,15 @@ on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
- '.github/**'
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [main, breaking/**]
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
- '.github/**'
|
||||
- "docs/**"
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
actions: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -30,6 +30,8 @@ jobs:
|
||||
- name: Build and export
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: docker/Dockerfile
|
||||
push: false
|
||||
load: false
|
||||
tags: pocket-id:test
|
||||
@@ -55,58 +57,113 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
db: [sqlite, postgres]
|
||||
include:
|
||||
- db: sqlite
|
||||
storage: filesystem
|
||||
- db: postgres
|
||||
storage: filesystem
|
||||
- db: sqlite
|
||||
storage: s3
|
||||
- db: sqlite
|
||||
storage: database
|
||||
- db: postgres
|
||||
storage: database
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: pnpm-lock.yaml
|
||||
|
||||
- name: Cache Playwright Browsers
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
id: playwright-cache
|
||||
with:
|
||||
path: ~/.cache/ms-playwright
|
||||
key: ${{ runner.os }}-playwright-${{ hashFiles('pnpm-lock.yaml') }}
|
||||
|
||||
- name: Cache PostgreSQL Docker image
|
||||
if: matrix.db == 'postgres'
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
id: postgres-cache
|
||||
with:
|
||||
path: /tmp/postgres-image.tar
|
||||
key: postgres-17-${{ runner.os }}
|
||||
|
||||
- name: Pull and save PostgreSQL image
|
||||
if: matrix.db == 'postgres' && steps.postgres-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
docker pull postgres:17
|
||||
docker save postgres:17 > /tmp/postgres-image.tar
|
||||
|
||||
- name: Load PostgreSQL image from cache
|
||||
- name: Load PostgreSQL image
|
||||
if: matrix.db == 'postgres' && steps.postgres-cache.outputs.cache-hit == 'true'
|
||||
run: docker load < /tmp/postgres-image.tar
|
||||
|
||||
- name: Cache LLDAP Docker image
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
id: lldap-cache
|
||||
with:
|
||||
path: /tmp/lldap-image.tar
|
||||
key: lldap-stable-${{ runner.os }}
|
||||
|
||||
- name: Pull and save LLDAP image
|
||||
if: steps.lldap-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
docker pull nitnelave/lldap:stable
|
||||
docker save nitnelave/lldap:stable > /tmp/lldap-image.tar
|
||||
|
||||
- name: Load LLDAP image from cache
|
||||
docker pull lldap/lldap:2025-05-19
|
||||
docker save lldap/lldap:2025-05-19 > /tmp/lldap-image.tar
|
||||
- name: Load LLDAP image
|
||||
if: steps.lldap-cache.outputs.cache-hit == 'true'
|
||||
run: docker load < /tmp/lldap-image.tar
|
||||
|
||||
- name: Cache SCIM Test Server Docker image
|
||||
uses: actions/cache@v4
|
||||
id: scim-cache
|
||||
with:
|
||||
path: /tmp/scim-test-server-image.tar
|
||||
key: scim-test-server-${{ runner.os }}
|
||||
- name: Pull and save SCIM Test Server image
|
||||
if: steps.scim-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
docker pull ghcr.io/pocket-id/scim-test-server
|
||||
docker save ghcr.io/pocket-id/scim-test-server > /tmp/scim-test-server-image.tar
|
||||
- name: Load SCIM Test Server image
|
||||
if: steps.scim-cache.outputs.cache-hit == 'true'
|
||||
run: docker load < /tmp/scim-test-server-image.tar
|
||||
|
||||
- name: Cache Localstack S3 Docker image
|
||||
if: matrix.storage == 's3'
|
||||
uses: actions/cache@v4
|
||||
id: s3-cache
|
||||
with:
|
||||
path: /tmp/localstack-s3-image.tar
|
||||
key: localstack-s3-latest-${{ runner.os }}
|
||||
- name: Pull and save Localstack S3 image
|
||||
if: matrix.storage == 's3' && steps.s3-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
docker pull localstack/localstack:s3-latest
|
||||
docker save localstack/localstack:s3-latest > /tmp/localstack-s3-image.tar
|
||||
- name: Load Localstack S3 image
|
||||
if: matrix.storage == 's3' && steps.s3-cache.outputs.cache-hit == 'true'
|
||||
run: docker load < /tmp/localstack-s3-image.tar
|
||||
|
||||
- name: Cache AWS CLI Docker image
|
||||
if: matrix.storage == 's3'
|
||||
uses: actions/cache@v4
|
||||
id: aws-cli-cache
|
||||
with:
|
||||
path: /tmp/aws-cli-image.tar
|
||||
key: aws-cli-latest-${{ runner.os }}
|
||||
- name: Pull and save AWS CLI image
|
||||
if: matrix.storage == 's3' && steps.aws-cli-cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
docker pull amazon/aws-cli:latest
|
||||
docker save amazon/aws-cli:latest > /tmp/aws-cli-image.tar
|
||||
- name: Load AWS CLI image
|
||||
if: matrix.storage == 's3' && steps.aws-cli-cache.outputs.cache-hit == 'true'
|
||||
run: docker load < /tmp/aws-cli-image.tar
|
||||
|
||||
- name: Download Docker image artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
@@ -123,19 +180,39 @@ jobs:
|
||||
working-directory: ./tests
|
||||
if: steps.playwright-cache.outputs.cache-hit != 'true'
|
||||
run: pnpm exec playwright install --with-deps chromium
|
||||
- name: Run Docker Container (sqlite) with LDAP
|
||||
if: matrix.db == 'sqlite'
|
||||
working-directory: ./tests/setup
|
||||
run: |
|
||||
docker compose up -d
|
||||
docker compose logs -f pocket-id &> /tmp/backend.log &
|
||||
|
||||
- name: Run Docker Container (postgres) with LDAP
|
||||
if: matrix.db == 'postgres'
|
||||
- name: Run Docker containers
|
||||
working-directory: ./tests/setup
|
||||
run: |
|
||||
docker compose -f docker-compose-postgres.yml up -d
|
||||
docker compose -f docker-compose-postgres.yml logs -f pocket-id &> /tmp/backend.log &
|
||||
DOCKER_COMPOSE_FILE=docker-compose.yml
|
||||
|
||||
cat > .env <<EOF
|
||||
FILE_BACKEND=${{ matrix.storage }}
|
||||
SCIM_SERVICE_PROVIDER_URL=http://localhost:18123/v2
|
||||
SCIM_SERVICE_PROVIDER_URL_INTERNAL=http://scim-test-server:8080/v2
|
||||
EOF
|
||||
|
||||
if [ "${{ matrix.db }}" = "postgres" ]; then
|
||||
DOCKER_COMPOSE_FILE=docker-compose-postgres.yml
|
||||
elif [ "${{ matrix.storage }}" = "s3" ]; then
|
||||
DOCKER_COMPOSE_FILE=docker-compose-s3.yml
|
||||
fi
|
||||
|
||||
docker compose -f "$DOCKER_COMPOSE_FILE" up -d
|
||||
|
||||
{
|
||||
LOG_FILE="/tmp/backend.log"
|
||||
while true; do
|
||||
CID=$(docker compose -f "$DOCKER_COMPOSE_FILE" ps -q pocket-id)
|
||||
if [ -n "$CID" ]; then
|
||||
echo "[$(date)] Attaching logs for $CID" >> "$LOG_FILE"
|
||||
docker logs -f --since=0 "$CID" >> "$LOG_FILE" 2>&1
|
||||
else
|
||||
echo "[$(date)] Container not yet running…" >> "$LOG_FILE"
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
} &
|
||||
|
||||
- name: Run Playwright tests
|
||||
working-directory: ./tests
|
||||
@@ -145,7 +222,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always() && github.event.pull_request.head.ref != 'i18n_crowdin'
|
||||
with:
|
||||
name: playwright-report-${{ matrix.db }}
|
||||
name: playwright-report-${{ matrix.db }}-${{ matrix.storage }}
|
||||
path: tests/.report
|
||||
include-hidden-files: true
|
||||
retention-days: 15
|
||||
@@ -154,7 +231,7 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always() && github.event.pull_request.head.ref != 'i18n_crowdin'
|
||||
with:
|
||||
name: backend-${{ matrix.db }}
|
||||
name: backend-${{ matrix.db }}-${{ matrix.storage }}
|
||||
path: /tmp/backend.log
|
||||
include-hidden-files: true
|
||||
retention-days: 15
|
||||
|
||||
22
.github/workflows/release.yml
vendored
22
.github/workflows/release.yml
vendored
@@ -3,7 +3,7 @@ name: Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
- "v*.*.*"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -19,14 +19,12 @@ jobs:
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: pnpm-lock.yaml
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'backend/go.mod'
|
||||
go-version-file: "backend/go.mod"
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
@@ -81,7 +79,7 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
file: Dockerfile-prebuilt
|
||||
file: docker/Dockerfile-prebuilt
|
||||
- name: Build and push container image (distroless)
|
||||
uses: docker/build-push-action@v6
|
||||
id: container-build-push-distroless
|
||||
@@ -91,21 +89,21 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta-distroless.outputs.tags }}
|
||||
labels: ${{ steps.meta-distroless.outputs.labels }}
|
||||
file: Dockerfile-distroless
|
||||
file: docker/Dockerfile-distroless
|
||||
- name: Binary attestation
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-path: 'backend/.bin/pocket-id-**'
|
||||
subject-path: "backend/.bin/pocket-id-**"
|
||||
- name: Container image attestation
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-name: '${{ env.DOCKER_IMAGE_NAME }}'
|
||||
subject-name: "${{ env.DOCKER_IMAGE_NAME }}"
|
||||
subject-digest: ${{ steps.container-build-push.outputs.digest }}
|
||||
push-to-registry: true
|
||||
- name: Container image attestation (distroless)
|
||||
uses: actions/attest-build-provenance@v2
|
||||
with:
|
||||
subject-name: '${{ env.DOCKER_IMAGE_NAME }}'
|
||||
subject-name: "${{ env.DOCKER_IMAGE_NAME }}"
|
||||
subject-digest: ${{ steps.container-build-push-distroless.outputs.digest }}
|
||||
push-to-registry: true
|
||||
- name: Upload binaries to release
|
||||
@@ -122,6 +120,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
- name: Mark release as published
|
||||
run: gh release edit ${{ github.ref_name }} --draft=false
|
||||
|
||||
30
.github/workflows/svelte-check.yml
vendored
30
.github/workflows/svelte-check.yml
vendored
@@ -4,21 +4,21 @@ on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'frontend/src/**'
|
||||
- '.github/svelte-check-matcher.json'
|
||||
- 'frontend/package.json'
|
||||
- 'frontend/package-lock.json'
|
||||
- 'frontend/tsconfig.json'
|
||||
- 'frontend/svelte.config.js'
|
||||
- "frontend/src/**"
|
||||
- ".github/svelte-check-matcher.json"
|
||||
- "frontend/package.json"
|
||||
- "frontend/package-lock.json"
|
||||
- "frontend/tsconfig.json"
|
||||
- "frontend/svelte.config.js"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- 'frontend/src/**'
|
||||
- '.github/svelte-check-matcher.json'
|
||||
- 'frontend/package.json'
|
||||
- 'frontend/package-lock.json'
|
||||
- 'frontend/tsconfig.json'
|
||||
- 'frontend/svelte.config.js'
|
||||
- "frontend/src/**"
|
||||
- ".github/svelte-check-matcher.json"
|
||||
- "frontend/package.json"
|
||||
- "frontend/package-lock.json"
|
||||
- "frontend/tsconfig.json"
|
||||
- "frontend/svelte.config.js"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
@@ -34,17 +34,15 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: 22
|
||||
cache: 'pnpm'
|
||||
cache-dependency-path: pnpm-lock.yaml
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm --filter pocket-id-frontend install --frozen-lockfile
|
||||
|
||||
4
.github/workflows/unit-tests.yml
vendored
4
.github/workflows/unit-tests.yml
vendored
@@ -16,8 +16,8 @@ jobs:
|
||||
actions: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: "backend/go.mod"
|
||||
cache-dependency-path: "backend/go.sum"
|
||||
|
||||
2
.github/workflows/update-aaguids.yml
vendored
2
.github/workflows/update-aaguids.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Fetch JSON data
|
||||
run: |
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,8 +1,12 @@
|
||||
# JetBrains
|
||||
**/.idea
|
||||
|
||||
# Node
|
||||
node_modules
|
||||
|
||||
# PNPM
|
||||
.pnpm-store/
|
||||
|
||||
# Output
|
||||
.output
|
||||
.vercel
|
||||
@@ -11,6 +15,7 @@ node_modules
|
||||
/backend/bin
|
||||
pocket-id
|
||||
/tests/test-results/*.json
|
||||
.tmp/
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
|
||||
4
.vscode/launch.json
vendored
4
.vscode/launch.json
vendored
@@ -5,12 +5,14 @@
|
||||
"name": "Backend",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"envFile": "${workspaceFolder}/backend/cmd/.env",
|
||||
"envFile": "${workspaceFolder}/backend/.env",
|
||||
"env": {
|
||||
"APP_ENV": "development"
|
||||
},
|
||||
"mode": "debug",
|
||||
"program": "${workspaceFolder}/backend/cmd/main.go",
|
||||
"buildFlags": "-tags=exclude_frontend",
|
||||
"cwd": "${workspaceFolder}/backend",
|
||||
},
|
||||
{
|
||||
"name": "Frontend",
|
||||
|
||||
221
CHANGELOG.md
221
CHANGELOG.md
@@ -1,3 +1,224 @@
|
||||
## v2.1.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- invalid cookie name for email login code device token ([d6a7b50](https://github.com/pocket-id/pocket-id/commit/d6a7b503ff4571b1291a55a569add3374f5e2d5b) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- add issuer url to oidc client details list ([#1197](https://github.com/pocket-id/pocket-id/pull/1197) by @kmendell)
|
||||
- process nonce within device authorization flow ([#1185](https://github.com/pocket-id/pocket-id/pull/1185) by @justincmoy)
|
||||
|
||||
### Other
|
||||
|
||||
- run SCIM jobs in context of gocron instead of custom implementation ([4881130](https://github.com/pocket-id/pocket-id/commit/4881130eadcef0642f8a87650b7c36fda453b51b) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v2.0.2...v2.1.0
|
||||
|
||||
## v2.0.2
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- migration fails if users exist with no email address ([2f651ad](https://github.com/pocket-id/pocket-id/commit/2f651adf3b4e8d689461da2083c3afcb1eb1d477) by @stonith404)
|
||||
- allow version downgrade database is dirty ([ba00f40](https://github.com/pocket-id/pocket-id/commit/ba00f40bd4b06f31d251599fcb1db63e902a6987) by @stonith404)
|
||||
- localhost callback URLs with port don't match correctly ([7c34501](https://github.com/pocket-id/pocket-id/commit/7c345010556f11a593948b2a1ae558b7a8003696) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- add no-op migration to postgres ([a24b2af](https://github.com/pocket-id/pocket-id/commit/a24b2afb7b8165bed05976058a8ae797adc245df) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v2.0.1...v2.0.2
|
||||
|
||||
## v2.0.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- admins imported from LDAP lose admin privileges ([2cce200](https://github.com/pocket-id/pocket-id/commit/2cce2008928081b5e0f0e6bcbc3f43816f082de9) by @stonith404)
|
||||
- restore old input input field size ([2341da9](https://github.com/pocket-id/pocket-id/commit/2341da99e9716686cf28dd0680d751ae9da0fadc) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- bump image tag to `v2` ([cd2e9f3](https://github.com/pocket-id/pocket-id/commit/cd2e9f3a2ad753815ef8da998f9b54853d953a2a) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v2.0.0...v2.0.1
|
||||
|
||||
## v2.0.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- update image format message to include WEBP ([#1133](https://github.com/pocket-id/pocket-id/pull/1133) by @sebdanielsson)
|
||||
- add Japanese locale to inlang settings ([#1142](https://github.com/pocket-id/pocket-id/pull/1142) by @tai-ga)
|
||||
- restrict email one time sign in token to same browser ([#1144](https://github.com/pocket-id/pocket-id/pull/1144) by @stonith404)
|
||||
- rename `LDAP_ATTRIBUTE_ADMIN_GROUP` env variable to `LDAP_ADMIN_GROUP_NAME` ([e1c5021](https://github.com/pocket-id/pocket-id/commit/e1c5021eeedcbc54bad0eccd72d7ae760be61934) by @stonith404)
|
||||
- make wildcard matching in callback URLs more stricter ([078152d](https://github.com/pocket-id/pocket-id/commit/078152d4dbb05dd027ff323f39d090ecb67927c7) by @stonith404)
|
||||
- remove ambiguous characters from login code ([d9e7bf9](https://github.com/pocket-id/pocket-id/commit/d9e7bf9eef522d8c081fac2000bace6f95518039) by @stonith404)
|
||||
- add missing translations to date picker ([894eaf3](https://github.com/pocket-id/pocket-id/commit/894eaf3cffdd9182b9c29e28b4dcb7e8bcbda26b) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- add HTTP `HEAD` method support ([#1135](https://github.com/pocket-id/pocket-id/pull/1135) by @stonith404)
|
||||
- add email logo customization ([#1150](https://github.com/pocket-id/pocket-id/pull/1150) by @MelvinSnijders)
|
||||
- add ability define user groups for sign up tokens ([#1155](https://github.com/pocket-id/pocket-id/pull/1155) by @stonith404)
|
||||
- minor redesign of auth pages ([08e4ffe](https://github.com/pocket-id/pocket-id/commit/08e4ffeb600a4a6644d91b1600b0205997ed1685) by @stonith404)
|
||||
- allow audit log retention to be controlled by env variable ([#1158](https://github.com/pocket-id/pocket-id/pull/1158) by @jenic)
|
||||
- restrict oidc clients by user groups per default ([#1164](https://github.com/pocket-id/pocket-id/pull/1164) by @stonith404)
|
||||
- add "restricted" column to oidc client table ([1bc9f5f](https://github.com/pocket-id/pocket-id/commit/1bc9f5f7e780310d81608381544ba530df7f433b) by @stonith404)
|
||||
- drop support for storing JWK on the filesystem ([f014458](https://github.com/pocket-id/pocket-id/commit/f0144584af90b918a3157a298f1bb95928a117b8) by @stonith404)
|
||||
- add CLI command for importing and exporting Pocket ID data ([3420a00](https://github.com/pocket-id/pocket-id/commit/3420a000737d89a5c3c6c250d171d96126553beb) by @stonith404)
|
||||
- remove DbProvider env variable and calculate it dynamically ([ba2f0f1](https://github.com/pocket-id/pocket-id/commit/ba2f0f18f4bacc5a86217dec0b0dcb6030c40cb9) by @kmendell)
|
||||
- add support for SCIM provisioning ([#1182](https://github.com/pocket-id/pocket-id/pull/1182) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- update AAGUIDs ([#1128](https://github.com/pocket-id/pocket-id/pull/1128) by @github-actions[bot])
|
||||
- fix api key e2e test ([25f67bd](https://github.com/pocket-id/pocket-id/commit/25f67bd25a0ee0cab48d72107722e8c8428fa547) by @stonith404)
|
||||
- update AAGUIDs ([#1140](https://github.com/pocket-id/pocket-id/pull/1140) by @github-actions[bot])
|
||||
- upgrade dependencies ([90f555f](https://github.com/pocket-id/pocket-id/commit/90f555f7c12ff07545f7cd1a1754a8c19f5a4978) by @stonith404)
|
||||
- fix type error after version bump ([edb32d8](https://github.com/pocket-id/pocket-id/commit/edb32d82b2c138433d8eb17d5a6a19f4728ae2d4) by @stonith404)
|
||||
- remove `breaking/**` push trigger from actions ([461293b](https://github.com/pocket-id/pocket-id/commit/461293ba1da4ddbff2c77f23a42487b63964e474) by @stonith404)
|
||||
- update AAGUIDs ([#1177](https://github.com/pocket-id/pocket-id/pull/1177) by @github-actions[bot])
|
||||
- preparation for merge into main branch ([#1113](https://github.com/pocket-id/pocket-id/pull/1113) by @stonith404)
|
||||
- bump pnpm to version 10.27.0 ([#1183](https://github.com/pocket-id/pocket-id/pull/1183) by @kmendell)
|
||||
- update forms and other areas to use new shadcn components ([#1115](https://github.com/pocket-id/pocket-id/pull/1115) by @kmendell)
|
||||
- run formatter ([e4a8ca4](https://github.com/pocket-id/pocket-id/commit/e4a8ca476cc3c7e8d8cdc8de21b5d7d99d07f7a0) by @stonith404)
|
||||
- upgrade dependencies ([4776b70](https://github.com/pocket-id/pocket-id/commit/4776b70d96f3dc291394dc79c941738bbe48199a) by @stonith404)
|
||||
- change translation string in e2e tests ([ffb2ef9](https://github.com/pocket-id/pocket-id/commit/ffb2ef91bd7bbe78eb29e86cd3675b695e821498) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.16.0...v2.0.0
|
||||
|
||||
## v1.16.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- use `quoted-printable` encoding for mails to prevent line limitation ([5cf73e9](https://github.com/pocket-id/pocket-id/commit/5cf73e9309640d097ba94d97851cf502b7b2e063) by @stonith404)
|
||||
- automatically create parent directory of Sqlite db ([cfc9e46](https://github.com/pocket-id/pocket-id/commit/cfc9e464d983b051e7ed4da1620fae61dc73cff2) by @stonith404)
|
||||
- global audit log user filter not working ([d98c0a3](https://github.com/pocket-id/pocket-id/commit/d98c0a391a747f9eea70ea01c3f984264a4a7a19) by @stonith404)
|
||||
- theme mode not correctly applied if selected manually ([a1cd325](https://github.com/pocket-id/pocket-id/commit/a1cd3251cd2b7d7aca610696ef338c5d01fdce2e) by @stonith404)
|
||||
- hide theme switcher on auth pages because of dynamic background ([5d6a7fd](https://github.com/pocket-id/pocket-id/commit/5d6a7fdb58b6b82894dcb9be3b9fe6ca3e53f5fa) by @stonith404)
|
||||
|
||||
### Documentation
|
||||
|
||||
- add `ENCRYPTION_KEY` to `.env.example` for breaking change preparation ([4eeb06f](https://github.com/pocket-id/pocket-id/commit/4eeb06f29d984164939bf66299075efead87ee19) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- light/dark/system mode switcher ([#1081](https://github.com/pocket-id/pocket-id/pull/1081) by @kmendell)
|
||||
- add support for S3 storage backend ([#1080](https://github.com/pocket-id/pocket-id/pull/1080) by @stonith404)
|
||||
- add support for WEBP profile pictures ([#1090](https://github.com/pocket-id/pocket-id/pull/1090) by @stonith404)
|
||||
- add database storage backend ([#1091](https://github.com/pocket-id/pocket-id/pull/1091) by @ItalyPaleAle)
|
||||
- adding/removing passkeys creates an entry in audit logs ([#1099](https://github.com/pocket-id/pocket-id/pull/1099) by @ItalyPaleAle)
|
||||
- add option to disable S3 integrity check ([a3c9687](https://github.com/pocket-id/pocket-id/commit/a3c968758a17e95b2e55ae179d6601d8ec2cf052) by @stonith404)
|
||||
- add `Cache-Control: private, no-store` to all API routes per default ([#1126](https://github.com/pocket-id/pocket-id/pull/1126) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- update pnpm to 10.20 ([#1082](https://github.com/pocket-id/pocket-id/pull/1082) by @kmendell)
|
||||
- run checks on PR to `breaking/**` branches ([ab9c0f9](https://github.com/pocket-id/pocket-id/commit/ab9c0f9ac092725c70ec3a963f57bc739f425d4f) by @stonith404)
|
||||
- use constants for AppEnv values ([#1098](https://github.com/pocket-id/pocket-id/pull/1098) by @ItalyPaleAle)
|
||||
- bump golang.org/x/crypto from 0.43.0 to 0.45.0 in /backend in the go_modules group across 1 directory ([#1107](https://github.com/pocket-id/pocket-id/pull/1107) by @dependabot[bot])
|
||||
- add Finish files ([ca888b3](https://github.com/pocket-id/pocket-id/commit/ca888b3dd221a209df5e7beb749156f7ea21e1c0) by @stonith404)
|
||||
- upgrade dependencies ([4bde271](https://github.com/pocket-id/pocket-id/commit/4bde271b4715f59bd2ed1f7c18a867daf0f26b8b) by @stonith404)
|
||||
- fix Dutch validation message ([f523f39](https://github.com/pocket-id/pocket-id/commit/f523f39483a06256892d17dc02528ea009c87a9f) by @stonith404)
|
||||
- fix package vulnerabilities ([3d46bad](https://github.com/pocket-id/pocket-id/commit/3d46badb3cecc1ee8eb8bfc9b377108be32d4ffc) by @stonith404)
|
||||
- update vscode launch.json ([#1117](https://github.com/pocket-id/pocket-id/pull/1117) by @mnestor)
|
||||
- rename file backend value `fs` to `filesystem` ([8d30346](https://github.com/pocket-id/pocket-id/commit/8d30346f642b483653f7a3dec006cb0273927afb) by @stonith404)
|
||||
- fix wrong storage value ([b2c718d](https://github.com/pocket-id/pocket-id/commit/b2c718d13d12b6c152e19974d3490c2ed7f5d51d) by @stonith404)
|
||||
- run formatter ([14c7471](https://github.com/pocket-id/pocket-id/commit/14c7471b5272cdaf42751701d842348d0d60cd0e) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.15.0...v1.16.0
|
||||
|
||||
## v1.15.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- sorting by PKCE and re-auth of OIDC clients ([e03270e](https://github.com/pocket-id/pocket-id/commit/e03270eb9d474735ff4a1b4d8c90f1857b8cd52b) by @stonith404)
|
||||
- replace %lang% placeholder in html lang ([#1071](https://github.com/pocket-id/pocket-id/pull/1071) by @daimond113)
|
||||
- disabled property gets ignored when creating an user ([76e0192](https://github.com/pocket-id/pocket-id/commit/76e0192ceec339b6ddb4ad3424057d2bb48fae8f) by @stonith404)
|
||||
- remove redundant indexes in Postgres ([6a038fc](https://github.com/pocket-id/pocket-id/commit/6a038fcf9afabbf00c45e42071e9bbe62ecab403) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- open edit page on table row click ([f184120](https://github.com/pocket-id/pocket-id/commit/f184120890c32f1e75a918c171084878a10e8b42) by @stonith404)
|
||||
- add ability to set default profile picture ([#1061](https://github.com/pocket-id/pocket-id/pull/1061) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- add support for OpenBSD binaries ([d683d18](https://github.com/pocket-id/pocket-id/commit/d683d18d9109ca2850e278b78f7bf3e5aca1d34d) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.14.2...v1.15.0
|
||||
|
||||
## v1.14.2
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- dark oidc client icons not saved on client creation ([#1057](https://github.com/pocket-id/pocket-id/pull/1057) by @mufeedali)
|
||||
|
||||
### Other
|
||||
|
||||
- add Turkish language files ([a190529](https://github.com/pocket-id/pocket-id/commit/a190529117fe20b5b836d452b382da69abba9458) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.14.1...v1.14.2
|
||||
|
||||
## v1.14.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Prevent blinding FOUC in dark mode ([#1054](https://github.com/pocket-id/pocket-id/pull/1054) by @mufeedali)
|
||||
- use credProps to save passkey on firefox android ([#1055](https://github.com/pocket-id/pocket-id/pull/1055) by @lhoursquentin)
|
||||
- ignore trailing slashes in `APP_URL` ([65616f6](https://github.com/pocket-id/pocket-id/commit/65616f65e53f3e62d18a8209929e68ddc8d2b9b8) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.14.0...v1.14.1
|
||||
|
||||
## v1.14.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- ignore trailing slash in URL ([9f0aa55](https://github.com/pocket-id/pocket-id/commit/9f0aa55be67b7a09810569250563bb388b40590a) by @stonith404)
|
||||
- use constant time comparisons when validating PKCE challenges ([#1047](https://github.com/pocket-id/pocket-id/pull/1047) by @ItalyPaleAle)
|
||||
- only animate login background on initial page load ([b356cef](https://github.com/pocket-id/pocket-id/commit/b356cef766697c621157235ae1d2743f3fe6720d) by @stonith404)
|
||||
- make pkce requirement visible in the oidc form if client is public ([47927d1](https://github.com/pocket-id/pocket-id/commit/47927d157470daa5b5a5b30e61a2ba69110eeff9) by @stonith404)
|
||||
- prevent page flickering on redirection based on auth state ([10d6403](https://github.com/pocket-id/pocket-id/commit/10d640385ff2078299a07f05e5ca3f0d392eecf7) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- add various improvements to the table component ([#961](https://github.com/pocket-id/pocket-id/pull/961) by @stonith404)
|
||||
- add support for dark mode oidc client icons ([#1039](https://github.com/pocket-id/pocket-id/pull/1039) by @kmendell)
|
||||
|
||||
### Other
|
||||
|
||||
- add Japanese files ([068fcc6](https://github.com/pocket-id/pocket-id/commit/068fcc65a62c76f55c9636f830fc769bd59220c4) by @kmendell)
|
||||
- bump sveltekit-superforms from 2.27.1 to 2.27.4 in the npm_and_yarn group across 1 directory ([#1031](https://github.com/pocket-id/pocket-id/pull/1031) by @dependabot[bot])
|
||||
- update AAGUIDs ([#1041](https://github.com/pocket-id/pocket-id/pull/1041) by @github-actions[bot])
|
||||
- bump vite from 7.0.7 to 7.0.8 in the npm_and_yarn group across 1 directory ([#1042](https://github.com/pocket-id/pocket-id/pull/1042) by @dependabot[bot])
|
||||
- upgrade dependencies ([6362ff9](https://github.com/pocket-id/pocket-id/commit/6362ff986124d056cc07d214855f198eab9cb97d) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.13.1...v1.14.0
|
||||
|
||||
## v1.13.1
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- uploading a client logo with an URL fails ([#1008](https://github.com/pocket-id/pocket-id/pull/1008) by @CzBiX)
|
||||
- mark any callback url as valid if they contain a wildcard ([#1006](https://github.com/pocket-id/pocket-id/pull/1006) by @stonith404)
|
||||
|
||||
### Other
|
||||
|
||||
- cleanup root of repo, update workflow actions ([#1003](https://github.com/pocket-id/pocket-id/pull/1003) by @kmendell)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.13.0...v1.13.1
|
||||
|
||||
## v1.13.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- uploading a client logo with an URL fails if folder doesn't exist ([ad8a90c](https://github.com/pocket-id/pocket-id/commit/ad8a90c839cc79b542b60ae66c7eb9254fa5f3e4) by @stonith404)
|
||||
|
||||
### Features
|
||||
|
||||
- add link to API docs on API key page ([2c74865](https://github.com/pocket-id/pocket-id/commit/2c74865173344766bd43ffd6ae6d93d564de47c7) by @stonith404)
|
||||
|
||||
**Full Changelog**: https://github.com/pocket-id/pocket-id/compare/v1.12.0...v1.13.0
|
||||
|
||||
## v1.12.0
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
@@ -4,7 +4,7 @@ Pocket ID is a simple OIDC provider that allows users to authenticate with their
|
||||
|
||||
→ Try out the [Demo](https://demo.pocket-id.org)
|
||||
|
||||
<img src="https://github.com/user-attachments/assets/96ac549d-b897-404a-8811-f42b16ea58e2" width="1200"/>
|
||||
<img src="https://github.com/user-attachments/assets/1e99ba44-76da-4b47-9b8a-dbe9b7f84512" width="1200"/>
|
||||
|
||||
The goal of Pocket ID is to be a simple and easy-to-use. There are other self-hosted OIDC providers like [Keycloak](https://www.keycloak.org/) or [ORY Hydra](https://www.ory.sh/hydra/) but they are often too complex for simple use cases.
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
_ "time/tzdata"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/cmds"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
)
|
||||
|
||||
// @title Pocket ID API
|
||||
@@ -11,5 +14,9 @@ import (
|
||||
// @description.markdown
|
||||
|
||||
func main() {
|
||||
if err := common.ValidateEnvConfig(&common.EnvConfig); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "config error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
cmds.Execute()
|
||||
}
|
||||
|
||||
@@ -32,10 +32,6 @@ func init() {
|
||||
panic(fmt.Errorf("failed to read index.html: %w", iErr))
|
||||
}
|
||||
|
||||
// Get the position of the first <script> tag
|
||||
idx := bytes.Index(index, []byte(scriptTag))
|
||||
|
||||
// Create writeIndexFn, which adds the CSP tag to the script tag if needed
|
||||
writeIndexFn = func(w io.Writer, nonce string) (err error) {
|
||||
// If there's no nonce, write the index as-is
|
||||
if nonce == "" {
|
||||
@@ -43,23 +39,16 @@ func init() {
|
||||
return err
|
||||
}
|
||||
|
||||
// We have a nonce, so first write the index until the <script> tag
|
||||
// Then we write the modified script tag
|
||||
// Finally, the rest of the index
|
||||
_, err = w.Write(index[0:idx])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write([]byte(`<script nonce="` + nonce + `">`))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(index[(idx + len(scriptTag)):])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Add nonce to all <script> tags
|
||||
// We replace "<script" with `<script nonce="..."` everywhere it appears
|
||||
modified := bytes.ReplaceAll(
|
||||
index,
|
||||
[]byte(scriptTag),
|
||||
[]byte(`<script nonce="`+nonce+`">`),
|
||||
)
|
||||
|
||||
return nil
|
||||
_, err = w.Write(modified)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,6 +64,11 @@ func RegisterFrontend(router *gin.Engine) error {
|
||||
router.NoRoute(func(c *gin.Context) {
|
||||
path := strings.TrimPrefix(c.Request.URL.Path, "/")
|
||||
|
||||
if strings.HasSuffix(path, "/") {
|
||||
c.Redirect(http.StatusMovedPermanently, strings.TrimRight(c.Request.URL.String(), "/"))
|
||||
return
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "api/") {
|
||||
c.JSON(http.StatusNotFound, gin.H{"error": "API endpoint not found"})
|
||||
return
|
||||
@@ -94,13 +88,9 @@ func RegisterFrontend(router *gin.Engine) error {
|
||||
c.Header("Content-Type", "text/html; charset=utf-8")
|
||||
c.Header("Cache-Control", "no-store")
|
||||
c.Status(http.StatusOK)
|
||||
|
||||
err = writeIndexFn(c.Writer, nonce)
|
||||
if err != nil {
|
||||
if err := writeIndexFn(c.Writer, nonce); err != nil {
|
||||
_ = c.Error(fmt.Errorf("failed to write index.html file: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
196
backend/go.mod
196
backend/go.mod
@@ -3,88 +3,107 @@ module github.com/pocket-id/pocket-id/backend
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.6
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.6
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0
|
||||
github.com/aws/smithy-go v1.24.0
|
||||
github.com/caarlos0/env/v11 v11.3.1
|
||||
github.com/cenkalti/backoff/v5 v5.0.3
|
||||
github.com/disintegration/imageorient v0.0.0-20180920195336-8147d86e83ec
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21
|
||||
github.com/emersion/go-smtp v0.21.3
|
||||
github.com/fxamacker/cbor/v2 v2.9.0
|
||||
github.com/gin-contrib/slog v1.1.0
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6
|
||||
github.com/emersion/go-smtp v0.24.0
|
||||
github.com/gin-contrib/slog v1.2.0
|
||||
github.com/gin-gonic/gin v1.11.0
|
||||
github.com/glebarez/go-sqlite v1.22.0
|
||||
github.com/glebarez/sqlite v1.11.0
|
||||
github.com/go-co-op/gocron/v2 v2.16.3
|
||||
github.com/go-ldap/ldap/v3 v3.4.10
|
||||
github.com/go-playground/validator/v10 v10.27.0
|
||||
github.com/go-webauthn/webauthn v0.11.2
|
||||
github.com/golang-migrate/migrate/v4 v4.18.3
|
||||
github.com/go-co-op/gocron/v2 v2.19.0
|
||||
github.com/go-ldap/ldap/v3 v3.4.12
|
||||
github.com/go-playground/validator/v10 v10.30.1
|
||||
github.com/go-webauthn/webauthn v0.15.0
|
||||
github.com/golang-migrate/migrate/v4 v4.19.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-uuid v1.0.3
|
||||
github.com/jinzhu/copier v0.4.0
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.0
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.10
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12
|
||||
github.com/lmittmann/tint v1.1.2
|
||||
github.com/mattn/go-isatty v0.0.20
|
||||
github.com/mileusna/useragent v1.3.5
|
||||
github.com/orandin/slog-gorm v1.4.0
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.0.0-beta.8
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.12.0
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.59.0
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.60.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0
|
||||
go.opentelemetry.io/otel v1.37.0
|
||||
go.opentelemetry.io/otel/log v0.13.0
|
||||
go.opentelemetry.io/otel/metric v1.37.0
|
||||
go.opentelemetry.io/otel/sdk v1.35.0
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0
|
||||
go.opentelemetry.io/otel/trace v1.37.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/image v0.30.0
|
||||
golang.org/x/sync v0.16.0
|
||||
golang.org/x/text v0.28.0
|
||||
golang.org/x/time v0.12.0
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.1.1
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.14.0
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.64.0
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.64.0
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
|
||||
go.opentelemetry.io/otel v1.39.0
|
||||
go.opentelemetry.io/otel/log v0.15.0
|
||||
go.opentelemetry.io/otel/metric v1.39.0
|
||||
go.opentelemetry.io/otel/sdk v1.39.0
|
||||
go.opentelemetry.io/otel/sdk/log v0.15.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0
|
||||
go.opentelemetry.io/otel/trace v1.39.0
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/image v0.34.0
|
||||
golang.org/x/sync v0.19.0
|
||||
golang.org/x/text v0.32.0
|
||||
golang.org/x/time v0.14.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/gorm v1.30.1
|
||||
gorm.io/gorm v1.31.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bytedance/sonic v1.14.0 // indirect
|
||||
github.com/bytedance/sonic/loader v0.3.0 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
|
||||
github.com/bytedance/gopkg v0.1.3 // indirect
|
||||
github.com/bytedance/sonic v1.14.2 // indirect
|
||||
github.com/bytedance/sonic/loader v0.4.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cloudwego/base64x v0.1.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/disintegration/gift v1.1.2 // indirect
|
||||
github.com/disintegration/gift v1.2.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.12 // indirect
|
||||
github.com/gin-contrib/sse v1.1.0 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-webauthn/x v0.1.23 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/go-webauthn/x v0.1.27 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.3 // indirect
|
||||
github.com/goccy/go-yaml v1.19.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||
github.com/google/go-github/v39 v39.2.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/go-tpm v0.9.5 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/go-tpm v0.9.8 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jackc/pgx/v5 v5.7.5 // indirect
|
||||
github.com/jackc/pgx/v5 v5.8.0 // indirect
|
||||
github.com/jackc/puddle/v2 v2.2.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
@@ -93,57 +112,60 @@ require (
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/option v1.0.1 // indirect
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.33 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncruces/go-strftime v1.0.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.22.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/quic-go/qpack v0.6.0 // indirect
|
||||
github.com/quic-go/quic-go v0.58.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/segmentio/asm v1.2.0 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/segmentio/asm v1.2.1 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/ugorji/go/codec v1.3.0 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/ugorji/go/codec v1.3.1 // indirect
|
||||
github.com/valyala/fastjson v1.6.7 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/arch v0.20.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 // indirect
|
||||
golang.org/x/net v0.43.0 // indirect
|
||||
golang.org/x/oauth2 v0.27.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/grpc v1.71.0 // indirect
|
||||
google.golang.org/protobuf v1.36.7 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.64.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.15.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.15.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/arch v0.23.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 // indirect
|
||||
golang.org/x/net v0.48.0 // indirect
|
||||
golang.org/x/oauth2 v0.34.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
|
||||
google.golang.org/grpc v1.78.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.66.7 // indirect
|
||||
modernc.org/libc v1.67.4 // indirect
|
||||
modernc.org/mathutil v1.7.1 // indirect
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.38.2 // indirect
|
||||
modernc.org/sqlite v1.42.2 // indirect
|
||||
)
|
||||
|
||||
505
backend/go.sum
505
backend/go.sum
@@ -1,77 +1,121 @@
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8=
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
|
||||
github.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=
|
||||
github.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e h1:4dAU9FXIyQktpoUAgOJK3OTFc/xug0PCXYCqU0FgDKI=
|
||||
github.com/alexbrainman/sspi v0.0.0-20250919150558-7d374ff0d59e/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4=
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.6 h1:F9vWao2TwjV2MyiyVS+duza0NIRtAslgLUM0vTA1ZaE=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.6/go.mod h1:SgHzKjEVsdQr6Opor0ihgWtkWdfRAIwxYzSJ8O85VHY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16 h1:CjMzUs78RDDv4ROu3JnJn/Ig1r6ZD7/T2DXLLRpejic=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.16/go.mod h1:uVW4OLBqbJXSHJYA9svT9BluSvvwbzLQ2Crf6UPzR3c=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7 h1:DIBqIrJ7hv+e4CmIk2z3pyKT+3B6qVMgRsawHiR3qso=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.7/go.mod h1:vLm00xmBke75UmpNvOcZQ/Q30ZFjbczeLFqGx5urmGo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16 h1:NSbvS17MlI2lurYgXnCOLvCFX38sBW4eiVER7+kkgsU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.16/go.mod h1:SwT8Tmqd4sA6G1qaGdzWCJN99bUmPGHfRwwq3G5Qb+A=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0 h1:MIWra+MSq53CFaXXAywB2qg9YvVZifkk6vEGl/1Qor0=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.95.0/go.mod h1:79S2BdqCJpScXZA2y+cpZuocWsjGjJINyXnOsf5DTz8=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8 h1:aM/Q24rIlS3bRAhTyFurowU8A0SMyGDtEOY/l/s/1Uw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.8/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk=
|
||||
github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk=
|
||||
github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ=
|
||||
github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA=
|
||||
github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA=
|
||||
github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
|
||||
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
|
||||
github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE=
|
||||
github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980=
|
||||
github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o=
|
||||
github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
|
||||
github.com/caarlos0/env/v11 v11.3.1 h1:cArPWC15hWmEt+gWk7YBi7lEXTXCvpaSdCiZE2X5mCA=
|
||||
github.com/caarlos0/env/v11 v11.3.1/go.mod h1:qupehSf/Y0TUTsxKywqRt/vJjN5nz6vauiYEUUr8P4U=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
|
||||
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dhui/dktest v0.4.5 h1:uUfYBIVREmj/Rw6MvgmqNAYzTiKOHJak+enB5Di73MM=
|
||||
github.com/dhui/dktest v0.4.5/go.mod h1:tmcyeHDKagvlDrz7gDKq4UAJOLIfVZYkfD5OnHDwcCo=
|
||||
github.com/disintegration/gift v1.1.2 h1:9ZyHJr+kPamiH10FX3Pynt1AxFUob812bU9Wt4GMzhs=
|
||||
github.com/dhui/dktest v0.4.6 h1:+DPKyScKSEp3VLtbMDHcUq6V5Lm5zfZZVb0Sk7Ahom4=
|
||||
github.com/dhui/dktest v0.4.6/go.mod h1:JHTSYDtKkvFNFHJKqCzVzqXecyv+tKt8EzceOmQOgbU=
|
||||
github.com/disintegration/gift v1.1.2/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI=
|
||||
github.com/disintegration/gift v1.2.1 h1:Y005a1X4Z7Uc+0gLpSAsKhWi4qLtsdEcMIbbdvdZ6pc=
|
||||
github.com/disintegration/gift v1.2.1/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI=
|
||||
github.com/disintegration/imageorient v0.0.0-20180920195336-8147d86e83ec h1:YrB6aVr9touOt75I9O1SiancmR2GMg45U9UYf0gtgWg=
|
||||
github.com/disintegration/imageorient v0.0.0-20180920195336-8147d86e83ec/go.mod h1:K0KBFIr1gWu/C1Gp10nFAcAE4hsB7JxE6OgLijrJ8Sk=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4=
|
||||
github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI=
|
||||
github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21 h1:OJyUGMJTzHTd1XQp98QTaHernxMYzRaOasRir9hUlFQ=
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21/go.mod h1:iL2twTeMvZnrg54ZoPDNfJaJaqy0xIQFuBdrLsmspwQ=
|
||||
github.com/emersion/go-smtp v0.21.3 h1:7uVwagE8iPYE48WhNsng3RRpCUpFvNl39JGNSIyGVMY=
|
||||
github.com/emersion/go-smtp v0.21.3/go.mod h1:qm27SGYgoIPRot6ubfQ/GpiPy/g3PaZAVRxiO/sDUgQ=
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6 h1:oP4q0fw+fOSWn3DfFi4EXdT+B+gTtzx8GC9xsc26Znk=
|
||||
github.com/emersion/go-sasl v0.0.0-20241020182733-b788ff22d5a6/go.mod h1:iL2twTeMvZnrg54ZoPDNfJaJaqy0xIQFuBdrLsmspwQ=
|
||||
github.com/emersion/go-smtp v0.24.0 h1:g6AfoF140mvW0vLNPD/LuCBLEAdlxOjIXqbIkJIS6Wk=
|
||||
github.com/emersion/go-smtp v0.24.0/go.mod h1:ZtRRkbTyp2XTHCA+BmyTFTrj8xY4I+b4McvHxCU2gsQ=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/gin-contrib/slog v1.1.0 h1:K9MVNrETT6r/C3u2Aheer/gxwVeVqrGL0hXlsmv3fm4=
|
||||
github.com/gin-contrib/slog v1.1.0/go.mod h1:PvNXQVXcVOAaaiJR84LV1/xlQHIaXi9ygEXyBkmjdkY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
|
||||
github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/gin-contrib/slog v1.2.0 h1:vAxZfr7knD1ZYK5+pMJLP52sZXIkJXkcRPa/0dx9hSk=
|
||||
github.com/gin-contrib/slog v1.2.0/go.mod h1:vYK6YltmpsEFkO0zfRMLTKHrWS3DwUSn0TMpT+kMagI=
|
||||
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
|
||||
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
|
||||
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
|
||||
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
|
||||
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
|
||||
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
|
||||
github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ=
|
||||
github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc=
|
||||
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-co-op/gocron/v2 v2.16.3 h1:kYqukZqBa8RC2+AFAHnunmKcs9GRTjwBo8WRF3I6cbI=
|
||||
github.com/go-co-op/gocron/v2 v2.16.3/go.mod h1:aTf7/+5Jo2E+cyAqq625UQ6DzpkV96b22VHIUAt6l3c=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10 h1:ot/iwPOhfpNVgB1o+AVXljizWZ9JTp7YF5oeyONmcJU=
|
||||
github.com/go-ldap/ldap/v3 v3.4.10/go.mod h1:JXh4Uxgi40P6E9rdsYqpUtbW46D9UTjJ9QSwGRznplY=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo=
|
||||
github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0 h1:OKf2y6LXPs/BgBI2fl8PxUpNAI1DA9Mg+hSeGOS38OU=
|
||||
github.com/go-co-op/gocron/v2 v2.19.0/go.mod h1:5lEiCKk1oVJV39Zg7/YG10OnaVrDAV5GGR6O0663k6U=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12 h1:1b81mv7MagXZ7+1r7cLTWmyuTqVqdwbtJSjC0DAp9s4=
|
||||
github.com/go-ldap/ldap/v3 v3.4.12/go.mod h1:+SPAGcTtOfmGsCb3h1RFiq4xpp4N636G75OEace8lNo=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -83,20 +127,24 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
|
||||
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-webauthn/webauthn v0.11.2 h1:Fgx0/wlmkClTKlnOsdOQ+K5HcHDsDcYIvtYmfhEOSUc=
|
||||
github.com/go-webauthn/webauthn v0.11.2/go.mod h1:aOtudaF94pM71g3jRwTYYwQTG1KyTILTcZqN1srkmD0=
|
||||
github.com/go-webauthn/x v0.1.23 h1:9lEO0s+g8iTyz5Vszlg/rXTGrx3CjcD0RZQ1GPZCaxI=
|
||||
github.com/go-webauthn/x v0.1.23/go.mod h1:AJd3hI7NfEp/4fI6T4CHD753u91l510lglU7/NMN6+E=
|
||||
github.com/go-playground/validator/v10 v10.30.1 h1:f3zDSN/zOma+w6+1Wswgd9fLkdwy06ntQJp0BBvFG0w=
|
||||
github.com/go-playground/validator/v10 v10.30.1/go.mod h1:oSuBIQzuJxL//3MelwSLD5hc2Tu889bF0Idm9Dg26cM=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/go-webauthn/webauthn v0.15.0 h1:LR1vPv62E0/6+sTenX35QrCmpMCzLeVAcnXeH4MrbJY=
|
||||
github.com/go-webauthn/webauthn v0.15.0/go.mod h1:hcAOhVChPRG7oqG7Xj6XKN1mb+8eXTGP/B7zBLzkX5A=
|
||||
github.com/go-webauthn/x v0.1.27 h1:CLyuB8JGn9xvw0etBl4fnclcbPTwhKpN4Xg32zaSYnI=
|
||||
github.com/go-webauthn/x v0.1.27/go.mod h1:KGYJQAPPgbpDKi4N7zKMGL+Iz6WgxKg3OlhVbPtuJXI=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE=
|
||||
github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs=
|
||||
github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||
github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA=
|
||||
github.com/golang-migrate/migrate/v4 v4.19.1/go.mod h1:CTcgfjxhaUtsLipnLoQRWCrjYXycRz/g5+RWDuYgPrE=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
@@ -108,35 +156,30 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
|
||||
github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
|
||||
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
|
||||
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
|
||||
github.com/google/go-tpm v0.9.5 h1:ocUmnDebX54dnW+MQWGQRbdaAcJELsa6PqZhJ48KwVU=
|
||||
github.com/google/go-tpm v0.9.5/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/go-querystring v1.2.0 h1:yhqkPbu2/OH+V9BfpCVPZkNmUXhb2gBxJArfhIxNtP0=
|
||||
github.com/google/go-querystring v1.2.0/go.mod h1:8IFJqpSRITyJ8QhQ13bmbeMBDfmeEJZD5A0egEOmkqU=
|
||||
github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo=
|
||||
github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs=
|
||||
github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
|
||||
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI=
|
||||
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs=
|
||||
github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
|
||||
github.com/jackc/pgx/v5 v5.8.0 h1:TYPDoleBBme0xGSAX3/+NujXXtpZn9HBONkQC7IEZSo=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
|
||||
@@ -177,14 +220,16 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA=
|
||||
github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw=
|
||||
github.com/lestrrat-go/dsig v1.0.0 h1:OE09s2r9Z81kxzJYRn07TFM9XA4akrUdoMwr0L8xj38=
|
||||
github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo=
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 h1:JpDe4Aybfl0soBvoVwjqDbp+9S1Y2OM7gcrVVMFPOzY=
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU=
|
||||
github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE=
|
||||
github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.0 h1:nZUx/zFg5uc2rhlu1L1DidGr5Sj02JbXvGSpnY4LMrc=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.0/go.mod h1:k2U1QIiyVqAKtkffbg+cUmsyiPGQsb9aAfNQiNFuQ9Q=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.10 h1:XuoCBhZBncRIjMQ32HdEc76rH0xK/Qv2wq5TBouYJDw=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.10/go.mod h1:kNMedLgTpHvPJkK5EMVa1JFz+UVyY2dMmZKu3qjl/Pk=
|
||||
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
|
||||
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3 h1:WjLHWkDkgWXeIUrKi/7lS/sGq2DjkSAwdTbH5RHXAKs=
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.3/go.mod h1:mSMtkZW92Z98M5YoNNztbRGxbXHql7tSitCvaxvo9l0=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12 h1:p25r68Y4KrbBdYjIsQweYxq794CtGCzcrc5dGzJIRjg=
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8=
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 h1:XxrcaJESE1fokHy3FpaQ/cXW8ZsIdWcdFzzLOcID3Ss=
|
||||
github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
@@ -193,12 +238,10 @@ github.com/lmittmann/tint v1.1.2 h1:2CQzrL6rslrsyjqLDwD11bZ5OpLBPU+g3G/r5LSfS8w=
|
||||
github.com/lmittmann/tint v1.1.2/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.33 h1:A5blZ5ulQo2AtayQ9/limgHEkFreKj1Dv226a1K73s0=
|
||||
github.com/mattn/go-sqlite3 v1.14.33/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws=
|
||||
github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
@@ -212,247 +255,207 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
|
||||
github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w=
|
||||
github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/orandin/slog-gorm v1.4.0 h1:FgA8hJufF9/jeNSYoEXmHPPBwET2gwlF3B85JdpsTUU=
|
||||
github.com/orandin/slog-gorm v1.4.0/go.mod h1:MoZ51+b7xE9lwGNPYEhxcUtRNrYzjdcKvA8QXQQGEPA=
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.0.0-beta.8 h1:aM1/rO6p+XV+l+seD7UCtFZgsOefDTrFVLvPoZWjXZs=
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.0.0-beta.8/go.mod h1:Jts8ztuE0PkUwY7VCJyp6B68ujQfr6G9P5Dn3Yx9u6w=
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.1.1 h1:lA8FH0oOrM4u7mLvowq8IT6a3Q/qEnqRzLQn9eH5ojc=
|
||||
github.com/oschwald/maxminddb-golang/v2 v2.1.1/go.mod h1:PLdx6PR+siSIoXqqy7C7r3SB3KZnhxWr1Dp6g0Hacl8=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
|
||||
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
|
||||
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
|
||||
github.com/quic-go/quic-go v0.58.0 h1:ggY2pvZaVdB9EyojxL1p+5mptkuHyX5MOSv4dgWF4Ug=
|
||||
github.com/quic-go/quic-go v0.58.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
|
||||
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/segmentio/asm v1.2.1 h1:DTNbBqs57ioxAD4PrArqftgypG4/qNpXoJx8TVXxPR0=
|
||||
github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
|
||||
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||
github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ=
|
||||
github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
|
||||
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||
github.com/valyala/fastjson v1.6.7 h1:ZE4tRy0CIkh+qDc5McjatheGX2czdn8slQjomexVpBM=
|
||||
github.com/valyala/fastjson v1.6.7/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.12.0 h1:lFM7SZo8Ce01RzRfnUFQZEYeWRf/MtOA3A5MobOqk2g=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.12.0/go.mod h1:Dw05mhFtrKAYu72Tkb3YBYeQpRUJ4quDgo2DQw3No5A=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.59.0 h1:HY2hJ7yn3KuEBBBsKxvF3ViSmzLwsgeNvD+0utRMgzc=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.59.0/go.mod h1:H4H7vs8766kwFnOZVEGMJFVF+phpBSmTckvvNRdJeDI=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.59.0 h1:dKhAFwh7SSoOw+gwMtSv+XLkUGTFAwAGMT3X3XSE4FA=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.59.0/go.mod h1:fPl+qlrhRdRntIpPs9JoQ0iBKAsnH5VkgppU1f9kyF4=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.60.0 h1:jj/B7eX95/mOxim9g9laNZkOHKz/XCHG0G410SntRy4=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.60.0/go.mod h1:ZvRTVaYYGypytG0zRp2A60lpj//cMq3ZnxYdZaljVBM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0 h1:5dTKu4I5Dn4P2hxyW3l3jTaZx9ACgg0ECos1eAVrheY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.10.0/go.mod h1:P5HcUI8obLrCCmM3sbVBohZFH34iszk/+CPWuakZWL8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0 h1:q/heq5Zh8xV1+7GoMGJpTxM2Lhq5+bFxB29tshuRuw0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.10.0/go.mod h1:leO2CSTg0Y+LyvmR7Wm4pUxE8KAmaM2GCVx7O+RATLA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0 h1:GKCEAZLEpEf78cUvudQdTg0aET2ObOZRB2HtXA0qPAI=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.10.0/go.mod h1:9/zqSWLCmHT/9Jo6fYeUDRRogOLL60ABLsHWS99lF8s=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 h1:T0Ec2E+3YZf5bgTNQVet8iTDW7oIk03tXHq+wkwIDnE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0/go.mod h1:30v2gqH+vYGJsesLWFov8u47EpYTcIQcBjKpI6pJThg=
|
||||
go.opentelemetry.io/otel/log v0.13.0 h1:yoxRoIZcohB6Xf0lNv9QIyCzQvrtGZklVbdCoyb7dls=
|
||||
go.opentelemetry.io/otel/log v0.13.0/go.mod h1:INKfG4k1O9CL25BaM1qLe0zIedOpvlS5Z7XgSbmN83E=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0 h1:lR4teQGWfeDVGoute6l0Ou+RpFqQ9vaPdrNJlST0bvw=
|
||||
go.opentelemetry.io/otel/sdk/log v0.10.0/go.mod h1:A+V1UTWREhWAittaQEG4bYm4gAZa6xnvVu+xKrIRkzo=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.14.0 h1:eypSOd+0txRKCXPNyqLPsbSfA0jULgJcGmSAdFAnrCM=
|
||||
go.opentelemetry.io/contrib/bridges/otelslog v0.14.0/go.mod h1:CRGvIBL/aAxpQU34ZxyQVFlovVcp67s4cAmQu8Jh9mc=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.64.0 h1:7TYhBCu6Xz6vDJGNtEslWZLuuX2IJ/aH50hBY4MVeUg=
|
||||
go.opentelemetry.io/contrib/bridges/prometheus v0.64.0/go.mod h1:tHQctZfAe7e4PBPGyt3kae6mQFXNpj+iiDJa3ithM50=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.64.0 h1:9pzPj3RFyKOxBAMkM2w84LpT+rdHam1XoFA+QhARiRw=
|
||||
go.opentelemetry.io/contrib/exporters/autoexport v0.64.0/go.mod h1:hlVZx1btWH0XTfXpuGX9dsquB50s+tc3fYFOO5elo2M=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.64.0 h1:7IKZbAYwlwLXAdu7SVPhzTjDjogWZxP4MIa7rovY+PU=
|
||||
go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.64.0/go.mod h1:+TF5nf3NIv2X8PGxqfYOaRnAoMM43rUA2C3XsN2DoWA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.39.0 h1:PI7pt9pkSnimWcp5sQhUA9OzLbc3Ba4sL+VEUTNsxrk=
|
||||
go.opentelemetry.io/contrib/propagators/b3 v1.39.0/go.mod h1:5gV/EzPnfYIwjzj+6y8tbGW2PKWhcsz5e/7twptRVQY=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 h1:W+m0g+/6v3pa5PgVf2xoFMi5YtNR06WtS7ve5pcvLtM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0/go.mod h1:JM31r0GGZ/GU94mX8hN4D8v6e40aFlUECSQ48HaLgHM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.15.0 h1:EKpiGphOYq3CYnIe2eX9ftUkyU+Y8Dtte8OaWyHJ4+I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.15.0/go.mod h1:nWFP7C+T8TygkTjJ7mAyEaFaE7wNfms3nV/vexZ6qt0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0 h1:nKP4Z2ejtHn3yShBb+2KawiXgpn8In5cT7aO2wXuOTE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.39.0/go.mod h1:NwjeBbNigsO4Aj9WgM0C+cKIrxsZUaRmZUO7A8I7u8o=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0/go.mod h1:iivMuj3xpR2DkUrUya3TPS/Z9h3dz7h01GxU+fQBRNg=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.15.0 h1:0BSddrtQqLEylcErkeFrJBmwFzcqfQq9+/uxfTZq+HE=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.15.0/go.mod h1:87sjYuAPzaRCtdd09GU5gM1U9wQLrrcYrm77mh5EBoc=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 h1:8UPA4IbVZxpsD76ihGOQiFml99GPAEZLohDXvqHdi6U=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0/go.mod h1:MZ1T/+51uIVKlRzGw1Fo46KEWThjlCBZKl2LzY5nv4g=
|
||||
go.opentelemetry.io/otel/log v0.15.0 h1:0VqVnc3MgyYd7QqNVIldC3dsLFKgazR6P3P3+ypkyDY=
|
||||
go.opentelemetry.io/otel/log v0.15.0/go.mod h1:9c/G1zbyZfgu1HmQD7Qj84QMmwTp2QCQsZH1aeoWDE4=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||
go.opentelemetry.io/otel/sdk/log v0.15.0 h1:WgMEHOUt5gjJE93yqfqJOkRflApNif84kxoHWS9VVHE=
|
||||
go.opentelemetry.io/otel/sdk/log v0.15.0/go.mod h1:qDC/FlKQCXfH5hokGsNg9aUBGMJQsrUyeOiW5u+dKBQ=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.14.0 h1:Ijbtz+JKXl8T2MngiwqBlPaHqc4YCaP/i13Qrow6gAM=
|
||||
go.opentelemetry.io/otel/sdk/log/logtest v0.14.0/go.mod h1:dCU8aEL6q+L9cYTqcVOk8rM9Tp8WdnHOPLiBgp0SGOA=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c=
|
||||
golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
|
||||
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
|
||||
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg=
|
||||
golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE=
|
||||
golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93 h1:fQsdNF2N+/YewlRZiricy4P1iimyPKZ/xwniHj8Q2a0=
|
||||
golang.org/x/exp v0.0.0-20251219203646-944ab1f22d93/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.30.0 h1:jD5RhkmVAnjqaCUXfbGBrn3lpxbknfN9w2UhHHU+5B4=
|
||||
golang.org/x/image v0.30.0/go.mod h1:SAEUTxCCMWSrJcCy/4HwavEsfZZJlYxeHLc6tTiAe/c=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/image v0.34.0 h1:33gCkyw9hmwbZJeZkct8XyR11yH889EQt/QH4VmXMn8=
|
||||
golang.org/x/image v0.34.0/go.mod h1:2RNFBZRB+vnwwFil8GkMdRvrJOFd1AzdZI6vOY+eJVU=
|
||||
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
|
||||
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
|
||||
golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
|
||||
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
|
||||
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
|
||||
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
|
||||
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
|
||||
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
||||
google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
|
||||
google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4=
|
||||
gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo=
|
||||
gorm.io/gorm v1.30.1 h1:lSHg33jJTBxs2mgJRfRZeLDG+WZaHYCk3Wtfl6Ngzo4=
|
||||
gorm.io/gorm v1.30.1/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
|
||||
modernc.org/cc/v4 v4.26.3 h1:yEN8dzrkRFnn4PUUKXLYIqVf2PJYAEjMTFjO3BDGc3I=
|
||||
modernc.org/cc/v4 v4.26.3/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.28.0 h1:rjznn6WWehKq7dG4JtLRKxb52Ecv8OUGah8+Z/SfpNU=
|
||||
modernc.org/ccgo/v4 v4.28.0/go.mod h1:JygV3+9AV6SmPhDasu4JgquwU81XAKLd3OKTUDNOiKE=
|
||||
modernc.org/fileutil v1.3.15 h1:rJAXTP6ilMW/1+kzDiqmBlHLWszheUFXIyGQIAvjJpY=
|
||||
modernc.org/fileutil v1.3.15/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis=
|
||||
modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0=
|
||||
modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc=
|
||||
modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM=
|
||||
modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA=
|
||||
modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
|
||||
modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI=
|
||||
modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito=
|
||||
modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE=
|
||||
modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY=
|
||||
modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks=
|
||||
modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI=
|
||||
modernc.org/libc v1.66.7 h1:rjhZ8OSCybKWxS1CJr0hikpEi6Vg+944Ouyrd+bQsoY=
|
||||
modernc.org/libc v1.66.7/go.mod h1:ln6tbWX0NH+mzApEoDRvilBvAWFt1HX7AUA4VDdVDPM=
|
||||
modernc.org/libc v1.67.4 h1:zZGmCMUVPORtKv95c2ReQN5VDjvkoRm9GWPTEPuvlWg=
|
||||
modernc.org/libc v1.67.4/go.mod h1:QvvnnJ5P7aitu0ReNpVIEyesuhmDLQ8kaEoyMjIFZJA=
|
||||
modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU=
|
||||
modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg=
|
||||
modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI=
|
||||
@@ -461,8 +464,8 @@ modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8=
|
||||
modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
|
||||
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
|
||||
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
|
||||
modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
|
||||
modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
|
||||
modernc.org/sqlite v1.42.2 h1:7hkZUNJvJFN2PgfUdjni9Kbvd4ef4mNLOu0B9FGxM74=
|
||||
modernc.org/sqlite v1.42.2/go.mod h1:+VkC6v3pLOAE0A0uVucQEcbVW0I5nHCeDaBf+DpsQT8=
|
||||
modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0=
|
||||
modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A=
|
||||
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
|
||||
|
||||
@@ -2,68 +2,77 @@ package bootstrap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
"github.com/pocket-id/pocket-id/backend/resources"
|
||||
)
|
||||
|
||||
// initApplicationImages copies the images from the images directory to the application-images directory
|
||||
// initApplicationImages copies the images from the embedded directory to the storage backend
|
||||
// and returns a map containing the detected file extensions in the application-images directory.
|
||||
func initApplicationImages() (map[string]string, error) {
|
||||
func initApplicationImages(ctx context.Context, fileStorage storage.FileStorage) (map[string]string, error) {
|
||||
// Previous versions of images
|
||||
// If these are found, they are deleted
|
||||
legacyImageHashes := imageHashMap{
|
||||
"background.jpg": mustDecodeHex("138d510030ed845d1d74de34658acabff562d306476454369a60ab8ade31933f"),
|
||||
"background.jpg": mustDecodeHex("138d510030ed845d1d74de34658acabff562d306476454369a60ab8ade31933f"),
|
||||
"background.webp": mustDecodeHex("3fc436a66d6b872b01d96a4e75046c46b5c3e2daccd51e98ecdf98fd445599ab"),
|
||||
}
|
||||
|
||||
dirPath := common.EnvConfig.UploadPath + "/application-images"
|
||||
|
||||
sourceFiles, err := resources.FS.ReadDir("images")
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to read directory: %w", err)
|
||||
}
|
||||
|
||||
destinationFiles, err := os.ReadDir(dirPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("failed to read directory: %w", err)
|
||||
destinationFiles, err := fileStorage.List(ctx, "application-images")
|
||||
if err != nil {
|
||||
if storage.IsNotExist(err) {
|
||||
destinationFiles = []storage.ObjectInfo{}
|
||||
} else {
|
||||
return nil, fmt.Errorf("failed to list application images: %w", err)
|
||||
}
|
||||
|
||||
}
|
||||
dstNameToExt := make(map[string]string, len(destinationFiles))
|
||||
for _, f := range destinationFiles {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := f.Name()
|
||||
nameWithoutExt, ext := utils.SplitFileName(name)
|
||||
destFilePath := path.Join(dirPath, name)
|
||||
|
||||
// Skip directories
|
||||
if f.IsDir() {
|
||||
_, name := path.Split(f.Path)
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
h, err := utils.CreateSha256FileHash(destFilePath)
|
||||
nameWithoutExt, ext := utils.SplitFileName(name)
|
||||
reader, _, err := fileStorage.Open(ctx, f.Path)
|
||||
if err != nil {
|
||||
slog.Warn("Failed to get hash for file", slog.String("name", name), slog.Any("error", err))
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
slog.Warn("Failed to open application image for hashing", slog.String("name", name), slog.Any("error", err))
|
||||
continue
|
||||
}
|
||||
hash, err := hashStream(reader)
|
||||
reader.Close()
|
||||
if err != nil {
|
||||
slog.Warn("Failed to hash application image", slog.String("name", name), slog.Any("error", err))
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the file is a legacy one - if so, delete it
|
||||
if legacyImageHashes.Contains(h) {
|
||||
if legacyImageHashes.Contains(hash) {
|
||||
slog.Info("Found legacy application image that will be removed", slog.String("name", name))
|
||||
err = os.Remove(destFilePath)
|
||||
if err != nil {
|
||||
if err := fileStorage.Delete(ctx, f.Path); err != nil {
|
||||
return nil, fmt.Errorf("failed to remove legacy file '%s': %w", name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Track existing files
|
||||
dstNameToExt[nameWithoutExt] = ext
|
||||
}
|
||||
|
||||
@@ -76,21 +85,21 @@ func initApplicationImages() (map[string]string, error) {
|
||||
name := sourceFile.Name()
|
||||
nameWithoutExt, ext := utils.SplitFileName(name)
|
||||
srcFilePath := path.Join("images", name)
|
||||
destFilePath := path.Join(dirPath, name)
|
||||
|
||||
// Skip if there's already an image at the path
|
||||
// We do not check the extension because users could have uploaded a different one
|
||||
if _, exists := dstNameToExt[nameWithoutExt]; exists {
|
||||
continue
|
||||
}
|
||||
|
||||
slog.Info("Writing new application image", slog.String("name", name))
|
||||
err := utils.CopyEmbeddedFileToDisk(srcFilePath, destFilePath)
|
||||
srcFile, err := resources.FS.Open(srcFilePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to copy file: %w", err)
|
||||
return nil, fmt.Errorf("failed to open embedded file '%s': %w", name, err)
|
||||
}
|
||||
|
||||
// Track the newly copied file so it can be included in the extensions map later
|
||||
if err := fileStorage.Save(ctx, path.Join("application-images", name), srcFile); err != nil {
|
||||
srcFile.Close()
|
||||
return nil, fmt.Errorf("failed to store application image '%s': %w", name, err)
|
||||
}
|
||||
srcFile.Close()
|
||||
dstNameToExt[nameWithoutExt] = ext
|
||||
}
|
||||
|
||||
@@ -118,3 +127,11 @@ func mustDecodeHex(str string) []byte {
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func hashStream(r io.Reader) ([]byte, error) {
|
||||
h := sha256.New()
|
||||
if _, err := io.Copy(h, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
@@ -7,13 +7,25 @@ import (
|
||||
"time"
|
||||
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/job"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
func Bootstrap(ctx context.Context) error {
|
||||
var shutdownFns []utils.Service
|
||||
defer func() { //nolint:contextcheck
|
||||
// Invoke all shutdown functions on exit
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := utils.NewServiceRunner(shutdownFns...).Run(shutdownCtx); err != nil {
|
||||
slog.Error("Error during graceful shutdown", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Initialize the observability stack, including the logger, distributed tracing, and metrics
|
||||
shutdownFns, httpClient, err := initObservability(ctx, common.EnvConfig.MetricsEnabled, common.EnvConfig.TracingEnabled)
|
||||
if err != nil {
|
||||
@@ -21,56 +33,104 @@ func Bootstrap(ctx context.Context) error {
|
||||
}
|
||||
slog.InfoContext(ctx, "Pocket ID is starting")
|
||||
|
||||
imageExtensions, err := initApplicationImages()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize application images: %w", err)
|
||||
}
|
||||
|
||||
// Connect to the database
|
||||
db, err := NewDatabase()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize database: %w", err)
|
||||
}
|
||||
|
||||
// Create all services
|
||||
svc, err := initServices(ctx, db, httpClient, imageExtensions)
|
||||
fileStorage, err := InitStorage(ctx, db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize services: %w", err)
|
||||
return fmt.Errorf("failed to initialize file storage (backend: %s): %w", common.EnvConfig.FileBackend, err)
|
||||
}
|
||||
|
||||
imageExtensions, err := initApplicationImages(ctx, fileStorage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize application images: %w", err)
|
||||
}
|
||||
|
||||
// Init the job scheduler
|
||||
scheduler, err := job.NewScheduler()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create job scheduler: %w", err)
|
||||
}
|
||||
|
||||
// Create all services
|
||||
svc, err := initServices(ctx, db, httpClient, imageExtensions, fileStorage, scheduler)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize services: %w", err)
|
||||
}
|
||||
|
||||
waitUntil, err := svc.appLockService.Acquire(ctx, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to acquire application lock: %w", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Until(waitUntil)):
|
||||
}
|
||||
|
||||
shutdownFn := func(shutdownCtx context.Context) error {
|
||||
sErr := svc.appLockService.Release(shutdownCtx)
|
||||
if sErr != nil {
|
||||
return fmt.Errorf("failed to release application lock: %w", sErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
shutdownFns = append(shutdownFns, shutdownFn)
|
||||
|
||||
// Register scheduled jobs
|
||||
err = registerScheduledJobs(ctx, db, svc, httpClient, scheduler)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register scheduled jobs: %w", err)
|
||||
}
|
||||
|
||||
// Init the router
|
||||
router := initRouter(db, svc)
|
||||
router, err := initRouter(db, svc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize router: %w", err)
|
||||
}
|
||||
|
||||
// Run all background services
|
||||
// This call blocks until the context is canceled
|
||||
err = utils.
|
||||
NewServiceRunner(router, scheduler.Run).
|
||||
Run(ctx)
|
||||
services := []utils.Service{svc.appLockService.RunRenewal, router}
|
||||
|
||||
if common.EnvConfig.AppEnv != "test" {
|
||||
services = append(services, scheduler.Run)
|
||||
}
|
||||
|
||||
err = utils.NewServiceRunner(services...).Run(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to run services: %w", err)
|
||||
}
|
||||
|
||||
// Invoke all shutdown functions
|
||||
// We give these a timeout of 5s
|
||||
// Note: we use a background context because the run context has been canceled already
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer shutdownCancel()
|
||||
err = utils.
|
||||
NewServiceRunner(shutdownFns...).
|
||||
Run(shutdownCtx) //nolint:contextcheck
|
||||
if err != nil {
|
||||
slog.Error("Error shutting down services", slog.Any("error", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func InitStorage(ctx context.Context, db *gorm.DB) (fileStorage storage.FileStorage, err error) {
|
||||
switch common.EnvConfig.FileBackend {
|
||||
case storage.TypeFileSystem:
|
||||
fileStorage, err = storage.NewFilesystemStorage(common.EnvConfig.UploadPath)
|
||||
case storage.TypeDatabase:
|
||||
fileStorage, err = storage.NewDatabaseStorage(db)
|
||||
case storage.TypeS3:
|
||||
s3Cfg := storage.S3Config{
|
||||
Bucket: common.EnvConfig.S3Bucket,
|
||||
Region: common.EnvConfig.S3Region,
|
||||
Endpoint: common.EnvConfig.S3Endpoint,
|
||||
AccessKeyID: common.EnvConfig.S3AccessKeyID,
|
||||
SecretAccessKey: common.EnvConfig.S3SecretAccessKey,
|
||||
ForcePathStyle: common.EnvConfig.S3ForcePathStyle,
|
||||
DisableDefaultIntegrityChecks: common.EnvConfig.S3DisableDefaultIntegrityChecks,
|
||||
Root: common.EnvConfig.UploadPath,
|
||||
}
|
||||
fileStorage, err = storage.NewS3Storage(ctx, s3Cfg)
|
||||
default:
|
||||
err = fmt.Errorf("unknown file storage backend: %s", common.EnvConfig.FileBackend)
|
||||
}
|
||||
if err != nil {
|
||||
return fileStorage, err
|
||||
}
|
||||
|
||||
return fileStorage, nil
|
||||
}
|
||||
|
||||
@@ -12,12 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/glebarez/sqlite"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
postgresMigrate "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
sqliteMigrate "github.com/golang-migrate/migrate/v4/database/sqlite3"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/github"
|
||||
"github.com/golang-migrate/migrate/v4/source/iofs"
|
||||
slogGorm "github.com/orandin/slog-gorm"
|
||||
"gorm.io/driver/postgres"
|
||||
"gorm.io/gorm"
|
||||
@@ -26,11 +21,10 @@ import (
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
sqliteutil "github.com/pocket-id/pocket-id/backend/internal/utils/sqlite"
|
||||
"github.com/pocket-id/pocket-id/backend/resources"
|
||||
)
|
||||
|
||||
func NewDatabase() (db *gorm.DB, err error) {
|
||||
db, err = connectDatabase()
|
||||
db, err = ConnectDatabase()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
@@ -39,105 +33,15 @@ func NewDatabase() (db *gorm.DB, err error) {
|
||||
return nil, fmt.Errorf("failed to get sql.DB: %w", err)
|
||||
}
|
||||
|
||||
// Choose the correct driver for the database provider
|
||||
var driver database.Driver
|
||||
switch common.EnvConfig.DbProvider {
|
||||
case common.DbProviderSqlite:
|
||||
driver, err = sqliteMigrate.WithInstance(sqlDb, &sqliteMigrate.Config{
|
||||
NoTxWrap: true,
|
||||
})
|
||||
case common.DbProviderPostgres:
|
||||
driver, err = postgresMigrate.WithInstance(sqlDb, &postgresMigrate.Config{})
|
||||
default:
|
||||
// Should never happen at this point
|
||||
return nil, fmt.Errorf("unsupported database provider: %s", common.EnvConfig.DbProvider)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create migration driver: %w", err)
|
||||
}
|
||||
|
||||
// Run migrations
|
||||
if err := migrateDatabase(driver); err != nil {
|
||||
if err := utils.MigrateDatabase(sqlDb); err != nil {
|
||||
return nil, fmt.Errorf("failed to run migrations: %w", err)
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func migrateDatabase(driver database.Driver) error {
|
||||
// Embedded migrations via iofs
|
||||
path := "migrations/" + string(common.EnvConfig.DbProvider)
|
||||
source, err := iofs.New(resources.FS, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create embedded migration source: %w", err)
|
||||
}
|
||||
|
||||
m, err := migrate.NewWithInstance("iofs", source, "pocket-id", driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create migration instance: %w", err)
|
||||
}
|
||||
|
||||
requiredVersion, err := getRequiredMigrationVersion(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get last migration version: %w", err)
|
||||
}
|
||||
|
||||
currentVersion, _, _ := m.Version()
|
||||
if currentVersion > requiredVersion {
|
||||
slog.Warn("Database version is newer than the application supports, possible downgrade detected", slog.Uint64("db_version", uint64(currentVersion)), slog.Uint64("app_version", uint64(requiredVersion)))
|
||||
if !common.EnvConfig.AllowDowngrade {
|
||||
return fmt.Errorf("database version (%d) is newer than application version (%d), downgrades are not allowed (set ALLOW_DOWNGRADE=true to enable)", currentVersion, requiredVersion)
|
||||
}
|
||||
slog.Info("Fetching migrations from GitHub to handle possible downgrades")
|
||||
return migrateDatabaseFromGitHub(driver, requiredVersion)
|
||||
}
|
||||
|
||||
if err := m.Migrate(requiredVersion); err != nil && !errors.Is(err, migrate.ErrNoChange) {
|
||||
return fmt.Errorf("failed to apply embedded migrations: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrateDatabaseFromGitHub(driver database.Driver, version uint) error {
|
||||
srcURL := "github://pocket-id/pocket-id/backend/resources/migrations/" + string(common.EnvConfig.DbProvider)
|
||||
|
||||
m, err := migrate.NewWithDatabaseInstance(srcURL, "pocket-id", driver)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create GitHub migration instance: %w", err)
|
||||
}
|
||||
|
||||
if err := m.Migrate(version); err != nil && !errors.Is(err, migrate.ErrNoChange) {
|
||||
return fmt.Errorf("failed to apply GitHub migrations: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getRequiredMigrationVersion reads the embedded migration files and returns the highest version number found.
|
||||
func getRequiredMigrationVersion(path string) (uint, error) {
|
||||
entries, err := resources.FS.ReadDir(path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read migration directory: %w", err)
|
||||
}
|
||||
|
||||
var maxVersion uint
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := entry.Name()
|
||||
var version uint
|
||||
n, err := fmt.Sscanf(name, "%d_", &version)
|
||||
if err == nil && n == 1 {
|
||||
if version > maxVersion {
|
||||
maxVersion = version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return maxVersion, nil
|
||||
}
|
||||
|
||||
func connectDatabase() (db *gorm.DB, err error) {
|
||||
func ConnectDatabase() (db *gorm.DB, err error) {
|
||||
var dialector gorm.Dialector
|
||||
|
||||
// Choose the correct database provider
|
||||
@@ -155,6 +59,12 @@ func connectDatabase() (db *gorm.DB, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !isMemoryDB {
|
||||
if err := ensureSqliteDatabaseDir(dbPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Before we connect, also make sure that there's a temporary folder for SQLite to write its data
|
||||
err = ensureSqliteTempDir(filepath.Dir(dbPath))
|
||||
if err != nil {
|
||||
@@ -388,6 +298,27 @@ func isSqliteInMemory(connString string) bool {
|
||||
return len(qs["mode"]) > 0 && qs["mode"][0] == "memory"
|
||||
}
|
||||
|
||||
// ensureSqliteDatabaseDir creates the parent directory for the SQLite database file if it doesn't exist yet
|
||||
func ensureSqliteDatabaseDir(dbPath string) error {
|
||||
dir := filepath.Dir(dbPath)
|
||||
|
||||
info, err := os.Stat(dir)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("SQLite database directory '%s' is not a directory", dir)
|
||||
}
|
||||
return nil
|
||||
case os.IsNotExist(err):
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create SQLite database directory '%s': %w", dir, err)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("failed to check SQLite database directory '%s': %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ensureSqliteTempDir ensures that SQLite has a directory where it can write temporary files if needed
|
||||
// The default directory may not be writable when using a container with a read-only root file system
|
||||
// See: https://www.sqlite.org/tempfiles.html
|
||||
|
||||
@@ -2,6 +2,8 @@ package bootstrap
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -84,6 +86,29 @@ func TestIsSqliteInMemory(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureSqliteDatabaseDir(t *testing.T) {
|
||||
t.Run("creates missing directory", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
dbPath := filepath.Join(tempDir, "nested", "pocket-id.db")
|
||||
|
||||
err := ensureSqliteDatabaseDir(dbPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := os.Stat(filepath.Dir(dbPath))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, info.IsDir())
|
||||
})
|
||||
|
||||
t.Run("fails when parent is file", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
filePath := filepath.Join(tempDir, "file.txt")
|
||||
require.NoError(t, os.WriteFile(filePath, []byte("test"), 0o600))
|
||||
|
||||
err := ensureSqliteDatabaseDir(filepath.Join(filePath, "data.db"))
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertSqlitePragmaArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
func init() {
|
||||
registerTestControllers = []func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services){
|
||||
func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services) {
|
||||
testService, err := service.NewTestService(db, svc.appConfigService, svc.jwtService, svc.ldapService)
|
||||
testService, err := service.NewTestService(db, svc.appConfigService, svc.jwtService, svc.ldapService, svc.appLockService, svc.fileStorage)
|
||||
if err != nil {
|
||||
slog.Error("Failed to initialize test service", slog.Any("error", err))
|
||||
os.Exit(1)
|
||||
|
||||
@@ -29,23 +29,14 @@ import (
|
||||
// This is used to register additional controllers for tests
|
||||
var registerTestControllers []func(apiGroup *gin.RouterGroup, db *gorm.DB, svc *services)
|
||||
|
||||
func initRouter(db *gorm.DB, svc *services) utils.Service {
|
||||
runner, err := initRouterInternal(db, svc)
|
||||
if err != nil {
|
||||
slog.Error("Failed to init router", "error", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return runner
|
||||
}
|
||||
|
||||
func initRouterInternal(db *gorm.DB, svc *services) (utils.Service, error) {
|
||||
func initRouter(db *gorm.DB, svc *services) (utils.Service, error) {
|
||||
// Set the appropriate Gin mode based on the environment
|
||||
switch common.EnvConfig.AppEnv {
|
||||
case "production":
|
||||
case common.AppEnvProduction:
|
||||
gin.SetMode(gin.ReleaseMode)
|
||||
case "development":
|
||||
case common.AppEnvDevelopment:
|
||||
gin.SetMode(gin.DebugMode)
|
||||
case "test":
|
||||
case common.AppEnvTest:
|
||||
gin.SetMode(gin.TestMode)
|
||||
}
|
||||
|
||||
@@ -63,6 +54,8 @@ func initRouterInternal(db *gorm.DB, svc *services) (utils.Service, error) {
|
||||
rateLimitMiddleware := middleware.NewRateLimitMiddleware().Add(rate.Every(time.Second), 60)
|
||||
|
||||
// Setup global middleware
|
||||
r.Use(middleware.HeadMiddleware())
|
||||
r.Use(middleware.NewCacheControlMiddleware().Add())
|
||||
r.Use(middleware.NewCorsMiddleware().Add())
|
||||
r.Use(middleware.NewCspMiddleware().Add())
|
||||
r.Use(middleware.NewErrorHandlerMiddleware().Add())
|
||||
@@ -90,9 +83,10 @@ func initRouterInternal(db *gorm.DB, svc *services) (utils.Service, error) {
|
||||
controller.NewUserGroupController(apiGroup, authMiddleware, svc.userGroupService)
|
||||
controller.NewCustomClaimController(apiGroup, authMiddleware, svc.customClaimService)
|
||||
controller.NewVersionController(apiGroup, svc.versionService)
|
||||
controller.NewScimController(apiGroup, authMiddleware, svc.scimService)
|
||||
|
||||
// Add test controller in non-production environments
|
||||
if common.EnvConfig.AppEnv != "production" {
|
||||
if !common.EnvConfig.AppEnv.IsProduction() {
|
||||
for _, f := range registerTestControllers {
|
||||
f(apiGroup, db, svc)
|
||||
}
|
||||
@@ -110,7 +104,17 @@ func initRouterInternal(db *gorm.DB, svc *services) (utils.Service, error) {
|
||||
srv := &http.Server{
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
ReadHeaderTimeout: 10 * time.Second,
|
||||
Handler: r,
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
// HEAD requests don't get matched by Gin routes, so we convert them to GET
|
||||
// middleware.HeadMiddleware will convert them back to HEAD later
|
||||
if req.Method == http.MethodHead {
|
||||
req.Method = http.MethodGet
|
||||
ctx := context.WithValue(req.Context(), middleware.IsHeadRequestCtxKey{}, true)
|
||||
req = req.WithContext(ctx)
|
||||
}
|
||||
|
||||
r.ServeHTTP(w, req)
|
||||
}),
|
||||
}
|
||||
|
||||
// Set up the listener
|
||||
@@ -186,6 +190,7 @@ func initLogger(r *gin.Engine) {
|
||||
"GET /api/application-images/logo",
|
||||
"GET /api/application-images/background",
|
||||
"GET /api/application-images/favicon",
|
||||
"GET /api/application-images/email",
|
||||
"GET /_app",
|
||||
"GET /fonts",
|
||||
"GET /healthz",
|
||||
|
||||
@@ -23,7 +23,7 @@ func registerScheduledJobs(ctx context.Context, db *gorm.DB, svc *services, http
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register DB cleanup jobs in scheduler: %w", err)
|
||||
}
|
||||
err = scheduler.RegisterFileCleanupJobs(ctx, db)
|
||||
err = scheduler.RegisterFileCleanupJobs(ctx, db, svc.fileStorage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register file cleanup jobs in scheduler: %w", err)
|
||||
}
|
||||
@@ -35,6 +35,10 @@ func registerScheduledJobs(ctx context.Context, db *gorm.DB, svc *services, http
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register analytics job in scheduler: %w", err)
|
||||
}
|
||||
err = scheduler.RegisterScimJobs(ctx, svc.scimService)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to register SCIM scheduler job: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,9 +5,11 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/job"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/service"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
)
|
||||
|
||||
type services struct {
|
||||
@@ -18,6 +20,7 @@ type services struct {
|
||||
auditLogService *service.AuditLogService
|
||||
jwtService *service.JwtService
|
||||
webauthnService *service.WebAuthnService
|
||||
scimService *service.ScimService
|
||||
userService *service.UserService
|
||||
customClaimService *service.CustomClaimService
|
||||
oidcService *service.OidcService
|
||||
@@ -25,10 +28,12 @@ type services struct {
|
||||
ldapService *service.LdapService
|
||||
apiKeyService *service.ApiKeyService
|
||||
versionService *service.VersionService
|
||||
fileStorage storage.FileStorage
|
||||
appLockService *service.AppLockService
|
||||
}
|
||||
|
||||
// Initializes all services
|
||||
func initServices(ctx context.Context, db *gorm.DB, httpClient *http.Client, imageExtensions map[string]string) (svc *services, err error) {
|
||||
func initServices(ctx context.Context, db *gorm.DB, httpClient *http.Client, imageExtensions map[string]string, fileStorage storage.FileStorage, scheduler *job.Scheduler) (svc *services, err error) {
|
||||
svc = &services{}
|
||||
|
||||
svc.appConfigService, err = service.NewAppConfigService(ctx, db)
|
||||
@@ -36,7 +41,9 @@ func initServices(ctx context.Context, db *gorm.DB, httpClient *http.Client, ima
|
||||
return nil, fmt.Errorf("failed to create app config service: %w", err)
|
||||
}
|
||||
|
||||
svc.appImagesService = service.NewAppImagesService(imageExtensions)
|
||||
svc.fileStorage = fileStorage
|
||||
svc.appImagesService = service.NewAppImagesService(imageExtensions, fileStorage)
|
||||
svc.appLockService = service.NewAppLockService(db)
|
||||
|
||||
svc.emailService, err = service.NewEmailService(db, svc.appConfigService)
|
||||
if err != nil {
|
||||
@@ -56,14 +63,16 @@ func initServices(ctx context.Context, db *gorm.DB, httpClient *http.Client, ima
|
||||
return nil, fmt.Errorf("failed to create WebAuthn service: %w", err)
|
||||
}
|
||||
|
||||
svc.oidcService, err = service.NewOidcService(ctx, db, svc.jwtService, svc.appConfigService, svc.auditLogService, svc.customClaimService, svc.webauthnService, httpClient)
|
||||
svc.scimService = service.NewScimService(db, scheduler, httpClient)
|
||||
|
||||
svc.oidcService, err = service.NewOidcService(ctx, db, svc.jwtService, svc.appConfigService, svc.auditLogService, svc.customClaimService, svc.webauthnService, svc.scimService, httpClient, fileStorage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create OIDC service: %w", err)
|
||||
}
|
||||
|
||||
svc.userGroupService = service.NewUserGroupService(db, svc.appConfigService)
|
||||
svc.userService = service.NewUserService(db, svc.jwtService, svc.auditLogService, svc.emailService, svc.appConfigService, svc.customClaimService)
|
||||
svc.ldapService = service.NewLdapService(db, httpClient, svc.appConfigService, svc.userService, svc.userGroupService)
|
||||
svc.userGroupService = service.NewUserGroupService(db, svc.appConfigService, svc.scimService)
|
||||
svc.userService = service.NewUserService(db, svc.jwtService, svc.auditLogService, svc.emailService, svc.appConfigService, svc.customClaimService, svc.appImagesService, svc.scimService, fileStorage)
|
||||
svc.ldapService = service.NewLdapService(db, httpClient, svc.appConfigService, svc.userService, svc.userGroupService, fileStorage)
|
||||
svc.apiKeyService = service.NewApiKeyService(db, svc.emailService)
|
||||
|
||||
svc.versionService = service.NewVersionService(httpClient)
|
||||
|
||||
70
backend/internal/cmds/export.go
Normal file
70
backend/internal/cmds/export.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/bootstrap"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/service"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type exportFlags struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
func init() {
|
||||
var flags exportFlags
|
||||
|
||||
exportCmd := &cobra.Command{
|
||||
Use: "export",
|
||||
Short: "Exports all data of Pocket ID into a ZIP file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runExport(cmd.Context(), flags)
|
||||
},
|
||||
}
|
||||
|
||||
exportCmd.Flags().StringVarP(&flags.Path, "path", "p", "pocket-id-export.zip", "Path to the ZIP file to export the data to, or '-' to write to stdout")
|
||||
|
||||
rootCmd.AddCommand(exportCmd)
|
||||
}
|
||||
|
||||
// runExport orchestrates the export flow
|
||||
func runExport(ctx context.Context, flags exportFlags) error {
|
||||
db, err := bootstrap.NewDatabase()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
storage, err := bootstrap.InitStorage(ctx, db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize storage: %w", err)
|
||||
}
|
||||
|
||||
exportService := service.NewExportService(db, storage)
|
||||
|
||||
var w io.Writer
|
||||
if flags.Path == "-" {
|
||||
w = os.Stdout
|
||||
} else {
|
||||
file, err := os.Create(flags.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create export file: %w", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
w = file
|
||||
}
|
||||
|
||||
if err := exportService.ExportToZip(ctx, w); err != nil {
|
||||
return fmt.Errorf("failed to export data: %w", err)
|
||||
}
|
||||
|
||||
if flags.Path != "-" {
|
||||
fmt.Printf("Exported data to %s\n", flags.Path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
191
backend/internal/cmds/import.go
Normal file
191
backend/internal/cmds/import.go
Normal file
@@ -0,0 +1,191 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/bootstrap"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/service"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
type importFlags struct {
|
||||
Path string
|
||||
Yes bool
|
||||
ForcefullyAcquireLock bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
var flags importFlags
|
||||
|
||||
importCmd := &cobra.Command{
|
||||
Use: "import",
|
||||
Short: "Imports all data of Pocket ID from a ZIP file",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runImport(cmd.Context(), flags)
|
||||
},
|
||||
}
|
||||
|
||||
importCmd.Flags().StringVarP(&flags.Path, "path", "p", "pocket-id-export.zip", "Path to the ZIP file to import the data from, or '-' to read from stdin")
|
||||
importCmd.Flags().BoolVarP(&flags.Yes, "yes", "y", false, "Skip confirmation prompts")
|
||||
importCmd.Flags().BoolVarP(&flags.ForcefullyAcquireLock, "forcefully-acquire-lock", "", false, "Forcefully acquire the application lock by terminating the Pocket ID instance")
|
||||
|
||||
rootCmd.AddCommand(importCmd)
|
||||
}
|
||||
|
||||
// runImport handles the high-level orchestration of the import process
|
||||
func runImport(ctx context.Context, flags importFlags) error {
|
||||
if !flags.Yes {
|
||||
ok, err := askForConfirmation()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get confirmation: %w", err)
|
||||
}
|
||||
if !ok {
|
||||
fmt.Println("Aborted")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
zipReader *zip.ReadCloser
|
||||
cleanup func()
|
||||
err error
|
||||
)
|
||||
|
||||
if flags.Path == "-" {
|
||||
zipReader, cleanup, err = readZipFromStdin()
|
||||
defer cleanup()
|
||||
} else {
|
||||
zipReader, err = zip.OpenReader(flags.Path)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open zip: %w", err)
|
||||
}
|
||||
defer zipReader.Close()
|
||||
|
||||
db, err := bootstrap.ConnectDatabase()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = acquireImportLock(ctx, db, flags.ForcefullyAcquireLock)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storage, err := bootstrap.InitStorage(ctx, db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize storage: %w", err)
|
||||
}
|
||||
|
||||
importService := service.NewImportService(db, storage)
|
||||
err = importService.ImportFromZip(ctx, &zipReader.Reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to import data from zip: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Import completed successfully.")
|
||||
return nil
|
||||
}
|
||||
|
||||
func acquireImportLock(ctx context.Context, db *gorm.DB, force bool) error {
|
||||
// Check if the kv table exists, in case we are starting from an empty database
|
||||
exists, err := utils.DBTableExists(db, "kv")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if kv table exists: %w", err)
|
||||
}
|
||||
if !exists {
|
||||
// This either means the database is empty, or the import is into an old version of PocketID that doesn't support locks
|
||||
// In either case, there's no lock to acquire
|
||||
fmt.Println("Could not acquire a lock because the 'kv' table does not exist. This is fine if you're importing into a new database, but make sure that there isn't an instance of Pocket ID currently running and using the same database.")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Note that we do not call a deferred Release if the data was imported
|
||||
// This is because we are overriding the contents of the database, so the lock is automatically lost
|
||||
appLockService := service.NewAppLockService(db)
|
||||
|
||||
opCtx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
waitUntil, err := appLockService.Acquire(opCtx, force)
|
||||
if err != nil {
|
||||
if errors.Is(err, service.ErrLockUnavailable) {
|
||||
//nolint:staticcheck
|
||||
return errors.New("Pocket ID must be stopped before importing data; please stop the running instance or run with --forcefully-acquire-lock to terminate the other instance")
|
||||
}
|
||||
return fmt.Errorf("failed to acquire application lock: %w", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(time.Until(waitUntil)):
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func askForConfirmation() (bool, error) {
|
||||
fmt.Println("WARNING: This feature is experimental and may not work correctly. Please create a backup before proceeding and report any issues you encounter.")
|
||||
fmt.Println()
|
||||
fmt.Println("WARNING: Import will erase all existing data at the following locations:")
|
||||
fmt.Printf("Database: %s\n", absolutePathOrOriginal(common.EnvConfig.DbConnectionString))
|
||||
fmt.Printf("Uploads Path: %s\n", absolutePathOrOriginal(common.EnvConfig.UploadPath))
|
||||
|
||||
ok, err := utils.PromptForConfirmation("Do you want to continue?")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// absolutePathOrOriginal returns the absolute path of the given path, or the original if it fails
|
||||
func absolutePathOrOriginal(path string) string {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return abs
|
||||
}
|
||||
|
||||
func readZipFromStdin() (*zip.ReadCloser, func(), error) {
|
||||
tmpFile, err := os.CreateTemp("", "pocket-id-import-*.zip")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to create temporary file: %w", err)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
_ = os.Remove(tmpFile.Name())
|
||||
}
|
||||
|
||||
if _, err := io.Copy(tmpFile, os.Stdin); err != nil {
|
||||
tmpFile.Close()
|
||||
cleanup()
|
||||
return nil, nil, fmt.Errorf("failed to read data from stdin: %w", err)
|
||||
}
|
||||
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, fmt.Errorf("failed to close temporary file: %w", err)
|
||||
}
|
||||
|
||||
r, err := zip.OpenReader(tmpFile.Name())
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return r, cleanup, nil
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/lestrrat-go/jwx/v3/jwa"
|
||||
@@ -78,7 +79,7 @@ func keyRotate(ctx context.Context, flags keyRotateFlags, db *gorm.DB, envConfig
|
||||
}
|
||||
if !ok {
|
||||
fmt.Println("Aborted")
|
||||
return nil
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package cmds
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -69,78 +67,14 @@ func TestKeyRotate(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Run("file storage", func(t *testing.T) {
|
||||
testKeyRotateWithFileStorage(t, tt.flags, tt.wantErr, tt.errMsg)
|
||||
})
|
||||
|
||||
t.Run("database storage", func(t *testing.T) {
|
||||
testKeyRotateWithDatabaseStorage(t, tt.flags, tt.wantErr, tt.errMsg)
|
||||
})
|
||||
testKeyRotateWithDatabaseStorage(t, tt.flags, tt.wantErr, tt.errMsg)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testKeyRotateWithFileStorage(t *testing.T, flags keyRotateFlags, wantErr bool, errMsg string) {
|
||||
// Create temporary directory for keys
|
||||
tempDir := t.TempDir()
|
||||
keysPath := filepath.Join(tempDir, "keys")
|
||||
err := os.MkdirAll(keysPath, 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Set up file storage config
|
||||
envConfig := &common.EnvConfigSchema{
|
||||
KeysStorage: "file",
|
||||
KeysPath: keysPath,
|
||||
}
|
||||
|
||||
// Create test database
|
||||
db := testingutils.NewDatabaseForTest(t)
|
||||
|
||||
// Initialize app config service and create instance
|
||||
appConfigService, err := service.NewAppConfigService(t.Context(), db)
|
||||
require.NoError(t, err)
|
||||
instanceID := appConfigService.GetDbConfig().InstanceID.Value
|
||||
|
||||
// Check if key exists before rotation
|
||||
keyProvider, err := jwkutils.GetKeyProvider(db, envConfig, instanceID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Run the key rotation
|
||||
err = keyRotate(t.Context(), flags, db, envConfig)
|
||||
|
||||
if wantErr {
|
||||
require.Error(t, err)
|
||||
if errMsg != "" {
|
||||
require.ErrorContains(t, err, errMsg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify key was created
|
||||
key, err := keyProvider.LoadKey()
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, key)
|
||||
|
||||
// Verify the algorithm matches what we requested
|
||||
alg, _ := key.Algorithm()
|
||||
assert.NotEmpty(t, alg)
|
||||
if flags.Alg != "" {
|
||||
expectedAlg := flags.Alg
|
||||
if expectedAlg == "EdDSA" {
|
||||
// EdDSA keys should have the EdDSA algorithm
|
||||
assert.Equal(t, "EdDSA", alg.String())
|
||||
} else {
|
||||
assert.Equal(t, expectedAlg, alg.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testKeyRotateWithDatabaseStorage(t *testing.T, flags keyRotateFlags, wantErr bool, errMsg string) {
|
||||
// Set up database storage config
|
||||
envConfig := &common.EnvConfigSchema{
|
||||
KeysStorage: "database",
|
||||
EncryptionKey: []byte("test-encryption-key-characters-long"),
|
||||
}
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ var oneTimeAccessTokenCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
// Create a new access token that expires in 1 hour
|
||||
oneTimeAccessToken, txErr = service.NewOneTimeAccessToken(user.ID, time.Hour)
|
||||
oneTimeAccessToken, txErr = service.NewOneTimeAccessToken(user.ID, time.Hour, false)
|
||||
if txErr != nil {
|
||||
return fmt.Errorf("failed to generate access token: %w", txErr)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,10 @@ import (
|
||||
)
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "pocket-id",
|
||||
Short: "A simple and easy-to-use OIDC provider that allows users to authenticate with their passkeys to your services.",
|
||||
Long: "By default, this command starts the pocket-id server.",
|
||||
Use: "pocket-id",
|
||||
Short: "A simple and easy-to-use OIDC provider that allows users to authenticate with their passkeys to your services.",
|
||||
Long: "By default, this command starts the pocket-id server.",
|
||||
SilenceUsage: true,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
// Start the server
|
||||
err := bootstrap.Bootstrap(cmd.Context())
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
_ "github.com/joho/godotenv/autoload"
|
||||
)
|
||||
|
||||
type AppEnv string
|
||||
type DbProvider string
|
||||
|
||||
const (
|
||||
@@ -25,39 +26,54 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
AppEnvProduction AppEnv = "production"
|
||||
AppEnvDevelopment AppEnv = "development"
|
||||
AppEnvTest AppEnv = "test"
|
||||
DbProviderSqlite DbProvider = "sqlite"
|
||||
DbProviderPostgres DbProvider = "postgres"
|
||||
MaxMindGeoLiteCityUrl string = "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=%s&suffix=tar.gz"
|
||||
defaultSqliteConnString string = "data/pocket-id.db"
|
||||
defaultFsUploadPath string = "data/uploads"
|
||||
AppUrl string = "http://localhost:1411"
|
||||
)
|
||||
|
||||
type EnvConfigSchema struct {
|
||||
AppEnv string `env:"APP_ENV" options:"toLower"`
|
||||
LogLevel string `env:"LOG_LEVEL" options:"toLower"`
|
||||
AppURL string `env:"APP_URL" options:"toLower"`
|
||||
DbProvider DbProvider `env:"DB_PROVIDER" options:"toLower"`
|
||||
DbConnectionString string `env:"DB_CONNECTION_STRING" options:"file"`
|
||||
UploadPath string `env:"UPLOAD_PATH"`
|
||||
KeysPath string `env:"KEYS_PATH"`
|
||||
KeysStorage string `env:"KEYS_STORAGE"`
|
||||
EncryptionKey []byte `env:"ENCRYPTION_KEY" options:"file"`
|
||||
Port string `env:"PORT"`
|
||||
Host string `env:"HOST" options:"toLower"`
|
||||
UnixSocket string `env:"UNIX_SOCKET"`
|
||||
UnixSocketMode string `env:"UNIX_SOCKET_MODE"`
|
||||
MaxMindLicenseKey string `env:"MAXMIND_LICENSE_KEY" options:"file"`
|
||||
GeoLiteDBPath string `env:"GEOLITE_DB_PATH"`
|
||||
GeoLiteDBUrl string `env:"GEOLITE_DB_URL"`
|
||||
LocalIPv6Ranges string `env:"LOCAL_IPV6_RANGES"`
|
||||
UiConfigDisabled bool `env:"UI_CONFIG_DISABLED"`
|
||||
MetricsEnabled bool `env:"METRICS_ENABLED"`
|
||||
TracingEnabled bool `env:"TRACING_ENABLED"`
|
||||
LogJSON bool `env:"LOG_JSON"`
|
||||
TrustProxy bool `env:"TRUST_PROXY"`
|
||||
AnalyticsDisabled bool `env:"ANALYTICS_DISABLED"`
|
||||
AllowDowngrade bool `env:"ALLOW_DOWNGRADE"`
|
||||
InternalAppURL string `env:"INTERNAL_APP_URL"`
|
||||
AppEnv AppEnv `env:"APP_ENV" options:"toLower"`
|
||||
EncryptionKey []byte `env:"ENCRYPTION_KEY" options:"file"`
|
||||
AppURL string `env:"APP_URL" options:"toLower,trimTrailingSlash"`
|
||||
DbProvider DbProvider
|
||||
DbConnectionString string `env:"DB_CONNECTION_STRING" options:"file"`
|
||||
TrustProxy bool `env:"TRUST_PROXY"`
|
||||
AuditLogRetentionDays int `env:"AUDIT_LOG_RETENTION_DAYS"`
|
||||
AnalyticsDisabled bool `env:"ANALYTICS_DISABLED"`
|
||||
AllowDowngrade bool `env:"ALLOW_DOWNGRADE"`
|
||||
InternalAppURL string `env:"INTERNAL_APP_URL"`
|
||||
UiConfigDisabled bool `env:"UI_CONFIG_DISABLED"`
|
||||
|
||||
FileBackend string `env:"FILE_BACKEND" options:"toLower"`
|
||||
UploadPath string `env:"UPLOAD_PATH"`
|
||||
S3Bucket string `env:"S3_BUCKET"`
|
||||
S3Region string `env:"S3_REGION"`
|
||||
S3Endpoint string `env:"S3_ENDPOINT"`
|
||||
S3AccessKeyID string `env:"S3_ACCESS_KEY_ID"`
|
||||
S3SecretAccessKey string `env:"S3_SECRET_ACCESS_KEY"`
|
||||
S3ForcePathStyle bool `env:"S3_FORCE_PATH_STYLE"`
|
||||
S3DisableDefaultIntegrityChecks bool `env:"S3_DISABLE_DEFAULT_INTEGRITY_CHECKS"`
|
||||
|
||||
Port string `env:"PORT"`
|
||||
Host string `env:"HOST" options:"toLower"`
|
||||
UnixSocket string `env:"UNIX_SOCKET"`
|
||||
UnixSocketMode string `env:"UNIX_SOCKET_MODE"`
|
||||
LocalIPv6Ranges string `env:"LOCAL_IPV6_RANGES"`
|
||||
|
||||
MaxMindLicenseKey string `env:"MAXMIND_LICENSE_KEY" options:"file"`
|
||||
GeoLiteDBPath string `env:"GEOLITE_DB_PATH"`
|
||||
GeoLiteDBUrl string `env:"GEOLITE_DB_URL"`
|
||||
|
||||
LogLevel string `env:"LOG_LEVEL" options:"toLower"`
|
||||
MetricsEnabled bool `env:"METRICS_ENABLED"`
|
||||
TracingEnabled bool `env:"TRACING_ENABLED"`
|
||||
LogJSON bool `env:"LOG_JSON"`
|
||||
}
|
||||
|
||||
var EnvConfig = defaultConfig()
|
||||
@@ -72,30 +88,16 @@ func init() {
|
||||
|
||||
func defaultConfig() EnvConfigSchema {
|
||||
return EnvConfigSchema{
|
||||
AppEnv: "production",
|
||||
LogLevel: "info",
|
||||
DbProvider: "sqlite",
|
||||
DbConnectionString: "",
|
||||
UploadPath: "data/uploads",
|
||||
KeysPath: "data/keys",
|
||||
KeysStorage: "", // "database" or "file"
|
||||
EncryptionKey: nil,
|
||||
AppURL: AppUrl,
|
||||
Port: "1411",
|
||||
Host: "0.0.0.0",
|
||||
UnixSocket: "",
|
||||
UnixSocketMode: "",
|
||||
MaxMindLicenseKey: "",
|
||||
GeoLiteDBPath: "data/GeoLite2-City.mmdb",
|
||||
GeoLiteDBUrl: MaxMindGeoLiteCityUrl,
|
||||
LocalIPv6Ranges: "",
|
||||
UiConfigDisabled: false,
|
||||
MetricsEnabled: false,
|
||||
TracingEnabled: false,
|
||||
TrustProxy: false,
|
||||
AnalyticsDisabled: false,
|
||||
AllowDowngrade: false,
|
||||
InternalAppURL: "",
|
||||
AppEnv: AppEnvProduction,
|
||||
LogLevel: "info",
|
||||
DbProvider: "sqlite",
|
||||
FileBackend: "filesystem",
|
||||
AuditLogRetentionDays: 90,
|
||||
AppURL: AppUrl,
|
||||
Port: "1411",
|
||||
Host: "0.0.0.0",
|
||||
GeoLiteDBPath: "data/GeoLite2-City.mmdb",
|
||||
GeoLiteDBUrl: MaxMindGeoLiteCityUrl,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,32 +120,28 @@ func parseEnvConfig() error {
|
||||
return fmt.Errorf("error preparing env config: %w", err)
|
||||
}
|
||||
|
||||
err = validateEnvConfig(&EnvConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// validateEnvConfig checks the EnvConfig for required fields and valid values
|
||||
func validateEnvConfig(config *EnvConfigSchema) error {
|
||||
// ValidateEnvConfig checks the EnvConfig for required fields and valid values
|
||||
func ValidateEnvConfig(config *EnvConfigSchema) error {
|
||||
if _, err := sloggin.ParseLevel(config.LogLevel); err != nil {
|
||||
return errors.New("invalid LOG_LEVEL value. Must be 'debug', 'info', 'warn' or 'error'")
|
||||
}
|
||||
|
||||
switch config.DbProvider {
|
||||
case DbProviderSqlite:
|
||||
if config.DbConnectionString == "" {
|
||||
config.DbConnectionString = defaultSqliteConnString
|
||||
}
|
||||
case DbProviderPostgres:
|
||||
if config.DbConnectionString == "" {
|
||||
return errors.New("missing required env var 'DB_CONNECTION_STRING' for Postgres database")
|
||||
}
|
||||
if len(config.EncryptionKey) < 16 {
|
||||
return errors.New("ENCRYPTION_KEY must be at least 16 bytes long")
|
||||
}
|
||||
|
||||
switch {
|
||||
case config.DbConnectionString == "":
|
||||
config.DbProvider = DbProviderSqlite
|
||||
config.DbConnectionString = defaultSqliteConnString
|
||||
case strings.HasPrefix(config.DbConnectionString, "postgres://") || strings.HasPrefix(config.DbConnectionString, "postgresql://"):
|
||||
config.DbProvider = DbProviderPostgres
|
||||
default:
|
||||
return errors.New("invalid DB_PROVIDER value. Must be 'sqlite' or 'postgres'")
|
||||
config.DbProvider = DbProviderSqlite
|
||||
}
|
||||
|
||||
parsedAppUrl, err := url.Parse(config.AppURL)
|
||||
@@ -167,18 +165,15 @@ func validateEnvConfig(config *EnvConfigSchema) error {
|
||||
}
|
||||
}
|
||||
|
||||
switch config.KeysStorage {
|
||||
// KeysStorage defaults to "file" if empty
|
||||
case "":
|
||||
config.KeysStorage = "file"
|
||||
case "database":
|
||||
if config.EncryptionKey == nil {
|
||||
return errors.New("ENCRYPTION_KEY must be non-empty when KEYS_STORAGE is database")
|
||||
}
|
||||
case "file":
|
||||
switch config.FileBackend {
|
||||
case "s3", "database":
|
||||
// All good, these are valid values
|
||||
case "", "filesystem":
|
||||
if config.UploadPath == "" {
|
||||
config.UploadPath = defaultFsUploadPath
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid value for KEYS_STORAGE: %s", config.KeysStorage)
|
||||
return errors.New("invalid FILE_BACKEND value. Must be 'filesystem', 'database', or 's3'")
|
||||
}
|
||||
|
||||
// Validate LOCAL_IPV6_RANGES
|
||||
@@ -200,6 +195,10 @@ func validateEnvConfig(config *EnvConfigSchema) error {
|
||||
|
||||
}
|
||||
|
||||
if config.AuditLogRetentionDays <= 0 {
|
||||
return errors.New("AUDIT_LOG_RETENTION_DAYS must be greater than 0")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
@@ -227,6 +226,10 @@ func prepareEnvConfig(config *EnvConfigSchema) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "trimTrailingSlash":
|
||||
if field.Kind() == reflect.String {
|
||||
field.SetString(strings.TrimRight(field.String(), "/"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -275,3 +278,11 @@ func resolveFileBasedEnvVariable(field reflect.Value, fieldType reflect.StructFi
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a AppEnv) IsProduction() bool {
|
||||
return a == AppEnvProduction
|
||||
}
|
||||
|
||||
func (a AppEnv) IsTest() bool {
|
||||
return a == AppEnvTest
|
||||
}
|
||||
|
||||
@@ -8,6 +8,20 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func parseAndValidateEnvConfig(t *testing.T) error {
|
||||
t.Helper()
|
||||
|
||||
if _, exists := os.LookupEnv("ENCRYPTION_KEY"); !exists {
|
||||
t.Setenv("ENCRYPTION_KEY", "0123456789abcdef")
|
||||
}
|
||||
|
||||
if err := parseEnvConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ValidateEnvConfig(&EnvConfig)
|
||||
}
|
||||
|
||||
func TestParseEnvConfig(t *testing.T) {
|
||||
// Store original config to restore later
|
||||
originalConfig := EnvConfig
|
||||
@@ -17,11 +31,10 @@ func TestParseEnvConfig(t *testing.T) {
|
||||
|
||||
t.Run("should parse valid SQLite config correctly", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "SQLITE") // should be lowercased automatically
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "HTTP://LOCALHOST:3000")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, DbProviderSqlite, EnvConfig.DbProvider)
|
||||
assert.Equal(t, "http://localhost:3000", EnvConfig.AppURL)
|
||||
@@ -29,147 +42,76 @@ func TestParseEnvConfig(t *testing.T) {
|
||||
|
||||
t.Run("should parse valid Postgres config correctly", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "POSTGRES")
|
||||
t.Setenv("DB_CONNECTION_STRING", "postgres://user:pass@localhost/db")
|
||||
t.Setenv("APP_URL", "https://example.com")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, DbProviderPostgres, EnvConfig.DbProvider)
|
||||
})
|
||||
|
||||
t.Run("should fail with invalid DB_PROVIDER", func(t *testing.T) {
|
||||
t.Run("should fail when ENCRYPTION_KEY is too short", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "invalid")
|
||||
t.Setenv("DB_CONNECTION_STRING", "test")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("ENCRYPTION_KEY", "short")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid DB_PROVIDER value")
|
||||
assert.ErrorContains(t, err, "ENCRYPTION_KEY must be at least 16 bytes long")
|
||||
})
|
||||
|
||||
t.Run("should set default SQLite connection string when DB_CONNECTION_STRING is empty", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, defaultSqliteConnString, EnvConfig.DbConnectionString)
|
||||
})
|
||||
|
||||
t.Run("should fail when Postgres DB_CONNECTION_STRING is missing", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "postgres")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "missing required env var 'DB_CONNECTION_STRING' for Postgres")
|
||||
})
|
||||
|
||||
t.Run("should fail with invalid APP_URL", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "€://not-a-valid-url")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "APP_URL is not a valid URL")
|
||||
})
|
||||
|
||||
t.Run("should fail when APP_URL contains path", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000/path")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "APP_URL must not contain a path")
|
||||
})
|
||||
|
||||
t.Run("should fail with invalid INTERNAL_APP_URL", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("INTERNAL_APP_URL", "€://not-a-valid-url")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "INTERNAL_APP_URL is not a valid URL")
|
||||
})
|
||||
|
||||
t.Run("should fail when INTERNAL_APP_URL contains path", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("INTERNAL_APP_URL", "http://localhost:3000/path")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "INTERNAL_APP_URL must not contain a path")
|
||||
})
|
||||
|
||||
t.Run("should default KEYS_STORAGE to 'file' when empty", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "file", EnvConfig.KeysStorage)
|
||||
})
|
||||
|
||||
t.Run("should fail when KEYS_STORAGE is 'database' but no encryption key", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("KEYS_STORAGE", "database")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "ENCRYPTION_KEY must be non-empty when KEYS_STORAGE is database")
|
||||
})
|
||||
|
||||
t.Run("should accept valid KEYS_STORAGE values", func(t *testing.T) {
|
||||
validStorageTypes := []string{"file", "database"}
|
||||
|
||||
for _, storage := range validStorageTypes {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("KEYS_STORAGE", storage)
|
||||
if storage == "database" {
|
||||
t.Setenv("ENCRYPTION_KEY", "test-key")
|
||||
}
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, storage, EnvConfig.KeysStorage)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("should fail with invalid KEYS_STORAGE value", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("KEYS_STORAGE", "invalid")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid value for KEYS_STORAGE")
|
||||
})
|
||||
|
||||
t.Run("should parse boolean environment variables correctly", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("UI_CONFIG_DISABLED", "true")
|
||||
@@ -178,7 +120,7 @@ func TestParseEnvConfig(t *testing.T) {
|
||||
t.Setenv("TRUST_PROXY", "true")
|
||||
t.Setenv("ANALYTICS_DISABLED", "false")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, EnvConfig.UiConfigDisabled)
|
||||
assert.True(t, EnvConfig.MetricsEnabled)
|
||||
@@ -187,27 +129,84 @@ func TestParseEnvConfig(t *testing.T) {
|
||||
assert.False(t, EnvConfig.AnalyticsDisabled)
|
||||
})
|
||||
|
||||
t.Run("should default audit log retention days to 90", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 90, EnvConfig.AuditLogRetentionDays)
|
||||
})
|
||||
|
||||
t.Run("should parse audit log retention days override", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("AUDIT_LOG_RETENTION_DAYS", "365")
|
||||
|
||||
err := parseEnvConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 365, EnvConfig.AuditLogRetentionDays)
|
||||
})
|
||||
|
||||
t.Run("should fail when AUDIT_LOG_RETENTION_DAYS is non-positive", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "sqlite")
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("AUDIT_LOG_RETENTION_DAYS", "0")
|
||||
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "AUDIT_LOG_RETENTION_DAYS must be greater than 0")
|
||||
})
|
||||
|
||||
t.Run("should parse string environment variables correctly", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_PROVIDER", "postgres")
|
||||
t.Setenv("DB_CONNECTION_STRING", "postgres://test")
|
||||
t.Setenv("APP_URL", "https://prod.example.com")
|
||||
t.Setenv("APP_ENV", "STAGING")
|
||||
t.Setenv("APP_ENV", "PRODUCTION")
|
||||
t.Setenv("UPLOAD_PATH", "/custom/uploads")
|
||||
t.Setenv("KEYS_PATH", "/custom/keys")
|
||||
t.Setenv("PORT", "8080")
|
||||
t.Setenv("HOST", "LOCALHOST")
|
||||
t.Setenv("UNIX_SOCKET", "/tmp/app.sock")
|
||||
t.Setenv("MAXMIND_LICENSE_KEY", "test-license")
|
||||
t.Setenv("GEOLITE_DB_PATH", "/custom/geolite.mmdb")
|
||||
|
||||
err := parseEnvConfig()
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "staging", EnvConfig.AppEnv) // lowercased
|
||||
assert.Equal(t, AppEnvProduction, EnvConfig.AppEnv) // lowercased
|
||||
assert.Equal(t, "/custom/uploads", EnvConfig.UploadPath)
|
||||
assert.Equal(t, "8080", EnvConfig.Port)
|
||||
assert.Equal(t, "localhost", EnvConfig.Host) // lowercased
|
||||
})
|
||||
|
||||
t.Run("should normalize file backend and default upload path", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("FILE_BACKEND", "FILESYSTEM")
|
||||
t.Setenv("UPLOAD_PATH", "")
|
||||
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "filesystem", EnvConfig.FileBackend)
|
||||
assert.Equal(t, defaultFsUploadPath, EnvConfig.UploadPath)
|
||||
})
|
||||
|
||||
t.Run("should fail with invalid FILE_BACKEND value", func(t *testing.T) {
|
||||
EnvConfig = defaultConfig()
|
||||
t.Setenv("DB_CONNECTION_STRING", "file:test.db")
|
||||
t.Setenv("APP_URL", "http://localhost:3000")
|
||||
t.Setenv("FILE_BACKEND", "invalid")
|
||||
|
||||
err := parseAndValidateEnvConfig(t)
|
||||
require.Error(t, err)
|
||||
assert.ErrorContains(t, err, "invalid FILE_BACKEND value")
|
||||
})
|
||||
}
|
||||
|
||||
func TestPrepareEnvConfig_FileBasedAndToLower(t *testing.T) {
|
||||
@@ -241,7 +240,7 @@ func TestPrepareEnvConfig_FileBasedAndToLower(t *testing.T) {
|
||||
err := prepareEnvConfig(&config)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "staging", config.AppEnv)
|
||||
assert.Equal(t, AppEnv("staging"), config.AppEnv)
|
||||
assert.Equal(t, "localhost", config.Host)
|
||||
assert.Equal(t, []byte(encryptionKeyContent), config.EncryptionKey)
|
||||
assert.Equal(t, dbConnContent, config.DbConnectionString)
|
||||
|
||||
@@ -38,6 +38,13 @@ type TokenInvalidOrExpiredError struct{}
|
||||
func (e *TokenInvalidOrExpiredError) Error() string { return "token is invalid or expired" }
|
||||
func (e *TokenInvalidOrExpiredError) HttpStatusCode() int { return 400 }
|
||||
|
||||
type DeviceCodeInvalid struct{}
|
||||
|
||||
func (e *DeviceCodeInvalid) Error() string {
|
||||
return "one time access code must be used on the device it was generated for"
|
||||
}
|
||||
func (e *DeviceCodeInvalid) HttpStatusCode() int { return 400 }
|
||||
|
||||
type TokenInvalidError struct{}
|
||||
|
||||
func (e *TokenInvalidError) Error() string {
|
||||
@@ -388,3 +395,13 @@ func (e *UserEmailNotSetError) Error() string {
|
||||
func (e *UserEmailNotSetError) HttpStatusCode() int {
|
||||
return http.StatusBadRequest
|
||||
}
|
||||
|
||||
type ImageNotFoundError struct{}
|
||||
|
||||
func (e *ImageNotFoundError) Error() string {
|
||||
return "Image not found"
|
||||
}
|
||||
|
||||
func (e *ImageNotFoundError) HttpStatusCode() int {
|
||||
return http.StatusNotFound
|
||||
}
|
||||
|
||||
@@ -45,15 +45,11 @@ func NewApiKeyController(group *gin.RouterGroup, authMiddleware *middleware.Auth
|
||||
// @Success 200 {object} dto.Paginated[dto.ApiKeyDto]
|
||||
// @Router /api/api-keys [get]
|
||||
func (c *ApiKeyController) listApiKeysHandler(ctx *gin.Context) {
|
||||
listRequestOptions := utils.ParseListRequestOptions(ctx)
|
||||
|
||||
userID := ctx.GetString("userID")
|
||||
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := ctx.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
apiKeys, pagination, err := c.apiKeyService.ListApiKeys(ctx.Request.Context(), userID, sortedPaginationRequest)
|
||||
apiKeys, pagination, err := c.apiKeyService.ListApiKeys(ctx.Request.Context(), userID, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
|
||||
@@ -23,12 +23,18 @@ func NewAppImagesController(
|
||||
}
|
||||
|
||||
group.GET("/application-images/logo", controller.getLogoHandler)
|
||||
group.GET("/application-images/email", controller.getEmailLogoHandler)
|
||||
group.GET("/application-images/background", controller.getBackgroundImageHandler)
|
||||
group.GET("/application-images/favicon", controller.getFaviconHandler)
|
||||
group.GET("/application-images/default-profile-picture", authMiddleware.Add(), controller.getDefaultProfilePicture)
|
||||
|
||||
group.PUT("/application-images/logo", authMiddleware.Add(), controller.updateLogoHandler)
|
||||
group.PUT("/application-images/email", authMiddleware.Add(), controller.updateEmailLogoHandler)
|
||||
group.PUT("/application-images/background", authMiddleware.Add(), controller.updateBackgroundImageHandler)
|
||||
group.PUT("/application-images/favicon", authMiddleware.Add(), controller.updateFaviconHandler)
|
||||
group.PUT("/application-images/default-profile-picture", authMiddleware.Add(), controller.updateDefaultProfilePicture)
|
||||
|
||||
group.DELETE("/application-images/default-profile-picture", authMiddleware.Add(), controller.deleteDefaultProfilePicture)
|
||||
}
|
||||
|
||||
type AppImagesController struct {
|
||||
@@ -55,6 +61,18 @@ func (c *AppImagesController) getLogoHandler(ctx *gin.Context) {
|
||||
c.getImage(ctx, imageName)
|
||||
}
|
||||
|
||||
// getEmailLogoHandler godoc
|
||||
// @Summary Get email logo image
|
||||
// @Description Get the email logo image for use in emails
|
||||
// @Tags Application Images
|
||||
// @Produce image/png
|
||||
// @Produce image/jpeg
|
||||
// @Success 200 {file} binary "Email logo image"
|
||||
// @Router /api/application-images/email [get]
|
||||
func (c *AppImagesController) getEmailLogoHandler(ctx *gin.Context) {
|
||||
c.getImage(ctx, "logoEmail")
|
||||
}
|
||||
|
||||
// getBackgroundImageHandler godoc
|
||||
// @Summary Get background image
|
||||
// @Description Get the background image for the application
|
||||
@@ -78,6 +96,18 @@ func (c *AppImagesController) getFaviconHandler(ctx *gin.Context) {
|
||||
c.getImage(ctx, "favicon")
|
||||
}
|
||||
|
||||
// getDefaultProfilePicture godoc
|
||||
// @Summary Get default profile picture image
|
||||
// @Description Get the default profile picture image for the application
|
||||
// @Tags Application Images
|
||||
// @Produce image/png
|
||||
// @Produce image/jpeg
|
||||
// @Success 200 {file} binary "Default profile picture image"
|
||||
// @Router /api/application-images/default-profile-picture [get]
|
||||
func (c *AppImagesController) getDefaultProfilePicture(ctx *gin.Context) {
|
||||
c.getImage(ctx, "default-profile-picture")
|
||||
}
|
||||
|
||||
// updateLogoHandler godoc
|
||||
// @Summary Update logo
|
||||
// @Description Update the application logo
|
||||
@@ -100,7 +130,38 @@ func (c *AppImagesController) updateLogoHandler(ctx *gin.Context) {
|
||||
imageName = "logoDark"
|
||||
}
|
||||
|
||||
if err := c.appImagesService.UpdateImage(file, imageName); err != nil {
|
||||
if err := c.appImagesService.UpdateImage(ctx.Request.Context(), file, imageName); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// updateEmailLogoHandler godoc
|
||||
// @Summary Update email logo
|
||||
// @Description Update the email logo for use in emails
|
||||
// @Tags Application Images
|
||||
// @Accept multipart/form-data
|
||||
// @Param file formData file true "Email logo image file"
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/application-images/email [put]
|
||||
func (c *AppImagesController) updateEmailLogoHandler(ctx *gin.Context) {
|
||||
file, err := ctx.FormFile("file")
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
fileType := utils.GetFileExtension(file.Filename)
|
||||
mimeType := utils.GetImageMimeType(fileType)
|
||||
|
||||
if mimeType != "image/png" && mimeType != "image/jpeg" {
|
||||
_ = ctx.Error(&common.WrongFileTypeError{ExpectedFileType: ".png or .jpg/jpeg"})
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.appImagesService.UpdateImage(ctx.Request.Context(), file, "logoEmail"); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -123,7 +184,7 @@ func (c *AppImagesController) updateBackgroundImageHandler(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.appImagesService.UpdateImage(file, "background"); err != nil {
|
||||
if err := c.appImagesService.UpdateImage(ctx.Request.Context(), file, "background"); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -152,7 +213,7 @@ func (c *AppImagesController) updateFaviconHandler(ctx *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.appImagesService.UpdateImage(file, "favicon"); err != nil {
|
||||
if err := c.appImagesService.UpdateImage(ctx.Request.Context(), file, "favicon"); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -161,13 +222,52 @@ func (c *AppImagesController) updateFaviconHandler(ctx *gin.Context) {
|
||||
}
|
||||
|
||||
func (c *AppImagesController) getImage(ctx *gin.Context, name string) {
|
||||
imagePath, mimeType, err := c.appImagesService.GetImage(name)
|
||||
reader, size, mimeType, err := c.appImagesService.GetImage(ctx.Request.Context(), name)
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
ctx.Header("Content-Type", mimeType)
|
||||
utils.SetCacheControlHeader(ctx, 15*time.Minute, 24*time.Hour)
|
||||
ctx.DataFromReader(http.StatusOK, size, mimeType, reader, nil)
|
||||
}
|
||||
|
||||
// updateDefaultProfilePicture godoc
|
||||
// @Summary Update default profile picture image
|
||||
// @Description Update the default profile picture image
|
||||
// @Tags Application Images
|
||||
// @Accept multipart/form-data
|
||||
// @Param file formData file true "Profile picture image file"
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/application-images/default-profile-picture [put]
|
||||
func (c *AppImagesController) updateDefaultProfilePicture(ctx *gin.Context) {
|
||||
file, err := ctx.FormFile("file")
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Header("Content-Type", mimeType)
|
||||
utils.SetCacheControlHeader(ctx, 15*time.Minute, 24*time.Hour)
|
||||
ctx.File(imagePath)
|
||||
if err := c.appImagesService.UpdateImage(ctx.Request.Context(), file, "default-profile-picture"); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// deleteDefaultProfilePicture godoc
|
||||
// @Summary Delete default profile picture image
|
||||
// @Description Delete the default profile picture image
|
||||
// @Tags Application Images
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/application-images/default-profile-picture [delete]
|
||||
func (c *AppImagesController) deleteDefaultProfilePicture(ctx *gin.Context) {
|
||||
if err := c.appImagesService.DeleteImage(ctx.Request.Context(), "default-profile-picture"); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -41,18 +41,12 @@ type AuditLogController struct {
|
||||
// @Success 200 {object} dto.Paginated[dto.AuditLogDto]
|
||||
// @Router /api/audit-logs [get]
|
||||
func (alc *AuditLogController) listAuditLogsForUserHandler(c *gin.Context) {
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
|
||||
err := c.ShouldBindQuery(&sortedPaginationRequest)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
userID := c.GetString("userID")
|
||||
|
||||
// Fetch audit logs for the user
|
||||
logs, pagination, err := alc.auditLogService.ListAuditLogsForUser(c.Request.Context(), userID, sortedPaginationRequest)
|
||||
logs, pagination, err := alc.auditLogService.ListAuditLogsForUser(c.Request.Context(), userID, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -86,26 +80,12 @@ func (alc *AuditLogController) listAuditLogsForUserHandler(c *gin.Context) {
|
||||
// @Param pagination[limit] query int false "Number of items per page" default(20)
|
||||
// @Param sort[column] query string false "Column to sort by"
|
||||
// @Param sort[direction] query string false "Sort direction (asc or desc)" default("asc")
|
||||
// @Param filters[userId] query string false "Filter by user ID"
|
||||
// @Param filters[event] query string false "Filter by event type"
|
||||
// @Param filters[clientName] query string false "Filter by client name"
|
||||
// @Param filters[location] query string false "Filter by location type (external or internal)"
|
||||
// @Success 200 {object} dto.Paginated[dto.AuditLogDto]
|
||||
// @Router /api/audit-logs/all [get]
|
||||
func (alc *AuditLogController) listAllAuditLogsHandler(c *gin.Context) {
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
var filters dto.AuditLogFilterDto
|
||||
if err := c.ShouldBindQuery(&filters); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
logs, pagination, err := alc.auditLogService.ListAllAuditLogs(c.Request.Context(), sortedPaginationRequest, filters)
|
||||
logs, pagination, err := alc.auditLogService.ListAllAuditLogs(c.Request.Context(), listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
|
||||
@@ -40,6 +40,11 @@ func (tc *TestController) resetAndSeedHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
if err := tc.TestService.ResetLock(c.Request.Context()); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := tc.TestService.ResetApplicationImages(c.Request.Context()); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -69,8 +74,6 @@ func (tc *TestController) resetAndSeedHandler(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
tc.TestService.SetJWTKeys()
|
||||
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -62,6 +63,8 @@ func NewOidcController(group *gin.RouterGroup, authMiddleware *middleware.AuthMi
|
||||
|
||||
group.GET("/oidc/users/me/clients", authMiddleware.WithAdminNotRequired().Add(), oc.listOwnAccessibleClientsHandler)
|
||||
|
||||
group.GET("/oidc/clients/:id/scim-service-provider", authMiddleware.Add(), oc.getClientScimServiceProviderHandler)
|
||||
|
||||
}
|
||||
|
||||
type OidcController struct {
|
||||
@@ -357,6 +360,7 @@ func (oc *OidcController) getClientMetaDataHandler(c *gin.Context) {
|
||||
clientDto := dto.OidcClientMetaDataDto{}
|
||||
err = dto.MapStruct(client, &clientDto)
|
||||
if err == nil {
|
||||
clientDto.HasDarkLogo = client.HasDarkLogo()
|
||||
c.JSON(http.StatusOK, clientDto)
|
||||
return
|
||||
}
|
||||
@@ -403,13 +407,9 @@ func (oc *OidcController) getClientHandler(c *gin.Context) {
|
||||
// @Router /api/oidc/clients [get]
|
||||
func (oc *OidcController) listClientsHandler(c *gin.Context) {
|
||||
searchTerm := c.Query("search")
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
clients, pagination, err := oc.oidcService.ListClients(c.Request.Context(), searchTerm, sortedPaginationRequest)
|
||||
clients, pagination, err := oc.oidcService.ListClients(c.Request.Context(), searchTerm, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -423,6 +423,7 @@ func (oc *OidcController) listClientsHandler(c *gin.Context) {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
clientDto.HasDarkLogo = client.HasDarkLogo()
|
||||
clientDto.AllowedUserGroupsCount, err = oc.oidcService.GetAllowedGroupsCountOfClient(c, client.ID)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
@@ -543,19 +544,23 @@ func (oc *OidcController) createClientSecretHandler(c *gin.Context) {
|
||||
// @Produce image/jpeg
|
||||
// @Produce image/svg+xml
|
||||
// @Param id path string true "Client ID"
|
||||
// @Param light query boolean false "Light mode logo (true) or dark mode logo (false)"
|
||||
// @Success 200 {file} binary "Logo image"
|
||||
// @Router /api/oidc/clients/{id}/logo [get]
|
||||
func (oc *OidcController) getClientLogoHandler(c *gin.Context) {
|
||||
imagePath, mimeType, err := oc.oidcService.GetClientLogo(c.Request.Context(), c.Param("id"))
|
||||
lightLogo, _ := strconv.ParseBool(c.DefaultQuery("light", "true"))
|
||||
|
||||
reader, size, mimeType, err := oc.oidcService.GetClientLogo(c.Request.Context(), c.Param("id"), lightLogo)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
utils.SetCacheControlHeader(c, 15*time.Minute, 12*time.Hour)
|
||||
|
||||
c.Header("Content-Type", mimeType)
|
||||
c.File(imagePath)
|
||||
c.DataFromReader(http.StatusOK, size, mimeType, reader, nil)
|
||||
}
|
||||
|
||||
// updateClientLogoHandler godoc
|
||||
@@ -565,6 +570,7 @@ func (oc *OidcController) getClientLogoHandler(c *gin.Context) {
|
||||
// @Accept multipart/form-data
|
||||
// @Param id path string true "Client ID"
|
||||
// @Param file formData file true "Logo image file (PNG, JPG, or SVG)"
|
||||
// @Param light query boolean false "Light mode logo (true) or dark mode logo (false)"
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/oidc/clients/{id}/logo [post]
|
||||
func (oc *OidcController) updateClientLogoHandler(c *gin.Context) {
|
||||
@@ -574,7 +580,9 @@ func (oc *OidcController) updateClientLogoHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
err = oc.oidcService.UpdateClientLogo(c.Request.Context(), c.Param("id"), file)
|
||||
lightLogo, _ := strconv.ParseBool(c.DefaultQuery("light", "true"))
|
||||
|
||||
err = oc.oidcService.UpdateClientLogo(c.Request.Context(), c.Param("id"), file, lightLogo)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -588,10 +596,19 @@ func (oc *OidcController) updateClientLogoHandler(c *gin.Context) {
|
||||
// @Description Delete the logo for an OIDC client
|
||||
// @Tags OIDC
|
||||
// @Param id path string true "Client ID"
|
||||
// @Param light query boolean false "Light mode logo (true) or dark mode logo (false)"
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/oidc/clients/{id}/logo [delete]
|
||||
func (oc *OidcController) deleteClientLogoHandler(c *gin.Context) {
|
||||
err := oc.oidcService.DeleteClientLogo(c.Request.Context(), c.Param("id"))
|
||||
var err error
|
||||
|
||||
lightLogo, _ := strconv.ParseBool(c.DefaultQuery("light", "true"))
|
||||
if lightLogo {
|
||||
err = oc.oidcService.DeleteClientLogo(c.Request.Context(), c.Param("id"))
|
||||
} else {
|
||||
err = oc.oidcService.DeleteClientDarkLogo(c.Request.Context(), c.Param("id"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -628,6 +645,7 @@ func (oc *OidcController) updateAllowedUserGroupsHandler(c *gin.Context) {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
oidcClientDto.HasDarkLogo = oidcClient.HasDarkLogo()
|
||||
|
||||
c.JSON(http.StatusOK, oidcClientDto)
|
||||
}
|
||||
@@ -685,12 +703,9 @@ func (oc *OidcController) listAuthorizedClientsHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
func (oc *OidcController) listAuthorizedClients(c *gin.Context, userID string) {
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
authorizedClients, pagination, err := oc.oidcService.ListAuthorizedClients(c.Request.Context(), userID, sortedPaginationRequest)
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
authorizedClients, pagination, err := oc.oidcService.ListAuthorizedClients(c.Request.Context(), userID, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -741,15 +756,11 @@ func (oc *OidcController) revokeOwnClientAuthorizationHandler(c *gin.Context) {
|
||||
// @Success 200 {object} dto.Paginated[dto.AccessibleOidcClientDto]
|
||||
// @Router /api/oidc/users/me/clients [get]
|
||||
func (oc *OidcController) listOwnAccessibleClientsHandler(c *gin.Context) {
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
userID := c.GetString("userID")
|
||||
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
clients, pagination, err := oc.oidcService.ListAccessibleOidcClients(c.Request.Context(), userID, sortedPaginationRequest)
|
||||
clients, pagination, err := oc.oidcService.ListAccessibleOidcClients(c.Request.Context(), userID, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -836,3 +847,29 @@ func (oc *OidcController) getClientPreviewHandler(c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusOK, preview)
|
||||
}
|
||||
|
||||
// getClientScimServiceProviderHandler godoc
|
||||
// @Summary Get SCIM service provider
|
||||
// @Description Get the SCIM service provider configuration for an OIDC client
|
||||
// @Tags OIDC
|
||||
// @Produce json
|
||||
// @Param id path string true "Client ID"
|
||||
// @Success 200 {object} dto.ScimServiceProviderDTO "SCIM service provider configuration"
|
||||
// @Router /api/oidc/clients/{id}/scim-service-provider [get]
|
||||
func (oc *OidcController) getClientScimServiceProviderHandler(c *gin.Context) {
|
||||
clientID := c.Param("id")
|
||||
|
||||
provider, err := oc.oidcService.GetClientScimServiceProvider(c.Request.Context(), clientID)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var providerDto dto.ScimServiceProviderDTO
|
||||
if err := dto.MapStruct(provider, &providerDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, providerDto)
|
||||
}
|
||||
|
||||
122
backend/internal/controller/scim_controller.go
Normal file
122
backend/internal/controller/scim_controller.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/middleware"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/service"
|
||||
)
|
||||
|
||||
func NewScimController(group *gin.RouterGroup, authMiddleware *middleware.AuthMiddleware, scimService *service.ScimService) {
|
||||
ugc := ScimController{
|
||||
scimService: scimService,
|
||||
}
|
||||
|
||||
group.POST("/scim/service-provider", authMiddleware.Add(), ugc.createServiceProviderHandler)
|
||||
group.POST("/scim/service-provider/:id/sync", authMiddleware.Add(), ugc.syncServiceProviderHandler)
|
||||
group.PUT("/scim/service-provider/:id", authMiddleware.Add(), ugc.updateServiceProviderHandler)
|
||||
group.DELETE("/scim/service-provider/:id", authMiddleware.Add(), ugc.deleteServiceProviderHandler)
|
||||
}
|
||||
|
||||
type ScimController struct {
|
||||
scimService *service.ScimService
|
||||
}
|
||||
|
||||
// syncServiceProviderHandler godoc
|
||||
// @Summary Sync SCIM service provider
|
||||
// @Description Trigger synchronization for a SCIM service provider
|
||||
// @Tags SCIM
|
||||
// @Param id path string true "Service Provider ID"
|
||||
// @Success 200 "OK"
|
||||
// @Router /api/scim/service-provider/{id}/sync [post]
|
||||
func (c *ScimController) syncServiceProviderHandler(ctx *gin.Context) {
|
||||
err := c.scimService.SyncServiceProvider(ctx.Request.Context(), ctx.Param("id"))
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusOK)
|
||||
}
|
||||
|
||||
// createServiceProviderHandler godoc
|
||||
// @Summary Create SCIM service provider
|
||||
// @Description Create a new SCIM service provider
|
||||
// @Tags SCIM
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param serviceProvider body dto.ScimServiceProviderCreateDTO true "SCIM service provider information"
|
||||
// @Success 201 {object} dto.ScimServiceProviderDTO "Created SCIM service provider"
|
||||
// @Router /api/scim/service-provider [post]
|
||||
func (c *ScimController) createServiceProviderHandler(ctx *gin.Context) {
|
||||
var input dto.ScimServiceProviderCreateDTO
|
||||
if err := ctx.ShouldBindJSON(&input); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
provider, err := c.scimService.CreateServiceProvider(ctx.Request.Context(), &input)
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var providerDTO dto.ScimServiceProviderDTO
|
||||
if err := dto.MapStruct(provider, &providerDTO); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusCreated, providerDTO)
|
||||
}
|
||||
|
||||
// updateServiceProviderHandler godoc
|
||||
// @Summary Update SCIM service provider
|
||||
// @Description Update an existing SCIM service provider
|
||||
// @Tags SCIM
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "Service Provider ID"
|
||||
// @Param serviceProvider body dto.ScimServiceProviderCreateDTO true "SCIM service provider information"
|
||||
// @Success 200 {object} dto.ScimServiceProviderDTO "Updated SCIM service provider"
|
||||
// @Router /api/scim/service-provider/{id} [put]
|
||||
func (c *ScimController) updateServiceProviderHandler(ctx *gin.Context) {
|
||||
var input dto.ScimServiceProviderCreateDTO
|
||||
if err := ctx.ShouldBindJSON(&input); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
provider, err := c.scimService.UpdateServiceProvider(ctx.Request.Context(), ctx.Param("id"), &input)
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var providerDTO dto.ScimServiceProviderDTO
|
||||
if err := dto.MapStruct(provider, &providerDTO); err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.JSON(http.StatusOK, providerDTO)
|
||||
}
|
||||
|
||||
// deleteServiceProviderHandler godoc
|
||||
// @Summary Delete SCIM service provider
|
||||
// @Description Delete a SCIM service provider by ID
|
||||
// @Tags SCIM
|
||||
// @Param id path string true "Service Provider ID"
|
||||
// @Success 204 "No Content"
|
||||
// @Router /api/scim/service-provider/{id} [delete]
|
||||
func (c *ScimController) deleteServiceProviderHandler(ctx *gin.Context) {
|
||||
err := c.scimService.DeleteServiceProvider(ctx.Request.Context(), ctx.Param("id"))
|
||||
if err != nil {
|
||||
_ = ctx.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.Status(http.StatusNoContent)
|
||||
}
|
||||
@@ -72,7 +72,7 @@ type UserController struct {
|
||||
// @Description Retrieve all groups a specific user belongs to
|
||||
// @Tags Users,User Groups
|
||||
// @Param id path string true "User ID"
|
||||
// @Success 200 {array} dto.UserGroupDtoWithUsers
|
||||
// @Success 200 {array} dto.UserGroupDto
|
||||
// @Router /api/users/{id}/groups [get]
|
||||
func (uc *UserController) getUserGroupsHandler(c *gin.Context) {
|
||||
userID := c.Param("id")
|
||||
@@ -82,7 +82,7 @@ func (uc *UserController) getUserGroupsHandler(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var groupsDto []dto.UserGroupDtoWithUsers
|
||||
var groupsDto []dto.UserGroupDto
|
||||
if err := dto.MapStructList(groups, &groupsDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -104,13 +104,9 @@ func (uc *UserController) getUserGroupsHandler(c *gin.Context) {
|
||||
// @Router /api/users [get]
|
||||
func (uc *UserController) listUsersHandler(c *gin.Context) {
|
||||
searchTerm := c.Query("search")
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
users, pagination, err := uc.userService.ListUsers(c.Request.Context(), searchTerm, sortedPaginationRequest)
|
||||
users, pagination, err := uc.userService.ListUsers(c.Request.Context(), searchTerm, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -290,7 +286,7 @@ func (uc *UserController) updateUserProfilePictureHandler(c *gin.Context) {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := uc.userService.UpdateProfilePicture(userID, file); err != nil {
|
||||
if err := uc.userService.UpdateProfilePicture(c.Request.Context(), userID, file); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -321,7 +317,7 @@ func (uc *UserController) updateCurrentUserProfilePictureHandler(c *gin.Context)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if err := uc.userService.UpdateProfilePicture(userID, file); err != nil {
|
||||
if err := uc.userService.UpdateProfilePicture(c.Request.Context(), userID, file); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -395,12 +391,13 @@ func (uc *UserController) RequestOneTimeAccessEmailAsUnauthenticatedUserHandler(
|
||||
return
|
||||
}
|
||||
|
||||
err := uc.userService.RequestOneTimeAccessEmailAsUnauthenticatedUser(c.Request.Context(), input.Email, input.RedirectPath)
|
||||
deviceToken, err := uc.userService.RequestOneTimeAccessEmailAsUnauthenticatedUser(c.Request.Context(), input.Email, input.RedirectPath)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
cookie.AddDeviceTokenCookie(c, deviceToken)
|
||||
c.Status(http.StatusNoContent)
|
||||
}
|
||||
|
||||
@@ -444,7 +441,8 @@ func (uc *UserController) RequestOneTimeAccessEmailAsAdminHandler(c *gin.Context
|
||||
// @Success 200 {object} dto.UserDto
|
||||
// @Router /api/one-time-access-token/{token} [post]
|
||||
func (uc *UserController) exchangeOneTimeAccessTokenHandler(c *gin.Context) {
|
||||
user, token, err := uc.userService.ExchangeOneTimeAccessToken(c.Request.Context(), c.Param("token"), c.ClientIP(), c.Request.UserAgent())
|
||||
deviceToken, _ := c.Cookie(cookie.DeviceTokenCookieName)
|
||||
user, token, err := uc.userService.ExchangeOneTimeAccessToken(c.Request.Context(), c.Param("token"), deviceToken, c.ClientIP(), c.Request.UserAgent())
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -547,7 +545,7 @@ func (uc *UserController) createSignupTokenHandler(c *gin.Context) {
|
||||
ttl = defaultSignupTokenDuration
|
||||
}
|
||||
|
||||
signupToken, err := uc.userService.CreateSignupToken(c.Request.Context(), ttl, input.UsageLimit)
|
||||
signupToken, err := uc.userService.CreateSignupToken(c.Request.Context(), ttl, input.UsageLimit, input.UserGroupIDs)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -574,13 +572,9 @@ func (uc *UserController) createSignupTokenHandler(c *gin.Context) {
|
||||
// @Success 200 {object} dto.Paginated[dto.SignupTokenDto]
|
||||
// @Router /api/signup-tokens [get]
|
||||
func (uc *UserController) listSignupTokensHandler(c *gin.Context) {
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
tokens, pagination, err := uc.userService.ListSignupTokens(c.Request.Context(), sortedPaginationRequest)
|
||||
tokens, pagination, err := uc.userService.ListSignupTokens(c.Request.Context(), listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -695,7 +689,7 @@ func (uc *UserController) updateUser(c *gin.Context, updateOwnUser bool) {
|
||||
func (uc *UserController) resetUserProfilePictureHandler(c *gin.Context) {
|
||||
userID := c.Param("id")
|
||||
|
||||
if err := uc.userService.ResetProfilePicture(userID); err != nil {
|
||||
if err := uc.userService.ResetProfilePicture(c.Request.Context(), userID); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
@@ -713,7 +707,7 @@ func (uc *UserController) resetUserProfilePictureHandler(c *gin.Context) {
|
||||
func (uc *UserController) resetCurrentUserProfilePictureHandler(c *gin.Context) {
|
||||
userID := c.GetString("userID")
|
||||
|
||||
if err := uc.userService.ResetProfilePicture(userID); err != nil {
|
||||
if err := uc.userService.ResetProfilePicture(c.Request.Context(), userID); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ func NewUserGroupController(group *gin.RouterGroup, authMiddleware *middleware.A
|
||||
userGroupsGroup.PUT("/:id", ugc.update)
|
||||
userGroupsGroup.DELETE("/:id", ugc.delete)
|
||||
userGroupsGroup.PUT("/:id/users", ugc.updateUsers)
|
||||
userGroupsGroup.PUT("/:id/allowed-oidc-clients", ugc.updateAllowedOidcClients)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -44,33 +45,27 @@ type UserGroupController struct {
|
||||
// @Param pagination[limit] query int false "Number of items per page" default(20)
|
||||
// @Param sort[column] query string false "Column to sort by"
|
||||
// @Param sort[direction] query string false "Sort direction (asc or desc)" default("asc")
|
||||
// @Success 200 {object} dto.Paginated[dto.UserGroupDtoWithUserCount]
|
||||
// @Success 200 {object} dto.Paginated[dto.UserGroupMinimalDto]
|
||||
// @Router /api/user-groups [get]
|
||||
func (ugc *UserGroupController) list(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
searchTerm := c.Query("search")
|
||||
var sortedPaginationRequest utils.SortedPaginationRequest
|
||||
if err := c.ShouldBindQuery(&sortedPaginationRequest); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
listRequestOptions := utils.ParseListRequestOptions(c)
|
||||
|
||||
groups, pagination, err := ugc.UserGroupService.List(ctx, searchTerm, sortedPaginationRequest)
|
||||
groups, pagination, err := ugc.UserGroupService.List(c, searchTerm, listRequestOptions)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Map the user groups to DTOs
|
||||
var groupsDto = make([]dto.UserGroupDtoWithUserCount, len(groups))
|
||||
var groupsDto = make([]dto.UserGroupMinimalDto, len(groups))
|
||||
for i, group := range groups {
|
||||
var groupDto dto.UserGroupDtoWithUserCount
|
||||
var groupDto dto.UserGroupMinimalDto
|
||||
if err := dto.MapStruct(group, &groupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
groupDto.UserCount, err = ugc.UserGroupService.GetUserCountOfGroup(ctx, group.ID)
|
||||
groupDto.UserCount, err = ugc.UserGroupService.GetUserCountOfGroup(c.Request.Context(), group.ID)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -78,7 +73,7 @@ func (ugc *UserGroupController) list(c *gin.Context) {
|
||||
groupsDto[i] = groupDto
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, dto.Paginated[dto.UserGroupDtoWithUserCount]{
|
||||
c.JSON(http.StatusOK, dto.Paginated[dto.UserGroupMinimalDto]{
|
||||
Data: groupsDto,
|
||||
Pagination: pagination,
|
||||
})
|
||||
@@ -91,7 +86,7 @@ func (ugc *UserGroupController) list(c *gin.Context) {
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "User Group ID"
|
||||
// @Success 200 {object} dto.UserGroupDtoWithUsers
|
||||
// @Success 200 {object} dto.UserGroupDto
|
||||
// @Router /api/user-groups/{id} [get]
|
||||
func (ugc *UserGroupController) get(c *gin.Context) {
|
||||
group, err := ugc.UserGroupService.Get(c.Request.Context(), c.Param("id"))
|
||||
@@ -100,7 +95,7 @@ func (ugc *UserGroupController) get(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var groupDto dto.UserGroupDtoWithUsers
|
||||
var groupDto dto.UserGroupDto
|
||||
if err := dto.MapStruct(group, &groupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -116,7 +111,7 @@ func (ugc *UserGroupController) get(c *gin.Context) {
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param userGroup body dto.UserGroupCreateDto true "User group information"
|
||||
// @Success 201 {object} dto.UserGroupDtoWithUsers "Created user group"
|
||||
// @Success 201 {object} dto.UserGroupDto "Created user group"
|
||||
// @Router /api/user-groups [post]
|
||||
func (ugc *UserGroupController) create(c *gin.Context) {
|
||||
var input dto.UserGroupCreateDto
|
||||
@@ -131,7 +126,7 @@ func (ugc *UserGroupController) create(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var groupDto dto.UserGroupDtoWithUsers
|
||||
var groupDto dto.UserGroupDto
|
||||
if err := dto.MapStruct(group, &groupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -148,7 +143,7 @@ func (ugc *UserGroupController) create(c *gin.Context) {
|
||||
// @Produce json
|
||||
// @Param id path string true "User Group ID"
|
||||
// @Param userGroup body dto.UserGroupCreateDto true "User group information"
|
||||
// @Success 200 {object} dto.UserGroupDtoWithUsers "Updated user group"
|
||||
// @Success 200 {object} dto.UserGroupDto "Updated user group"
|
||||
// @Router /api/user-groups/{id} [put]
|
||||
func (ugc *UserGroupController) update(c *gin.Context) {
|
||||
var input dto.UserGroupCreateDto
|
||||
@@ -163,7 +158,7 @@ func (ugc *UserGroupController) update(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var groupDto dto.UserGroupDtoWithUsers
|
||||
var groupDto dto.UserGroupDto
|
||||
if err := dto.MapStruct(group, &groupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -198,7 +193,7 @@ func (ugc *UserGroupController) delete(c *gin.Context) {
|
||||
// @Produce json
|
||||
// @Param id path string true "User Group ID"
|
||||
// @Param users body dto.UserGroupUpdateUsersDto true "List of user IDs to assign to this group"
|
||||
// @Success 200 {object} dto.UserGroupDtoWithUsers
|
||||
// @Success 200 {object} dto.UserGroupDto
|
||||
// @Router /api/user-groups/{id}/users [put]
|
||||
func (ugc *UserGroupController) updateUsers(c *gin.Context) {
|
||||
var input dto.UserGroupUpdateUsersDto
|
||||
@@ -213,7 +208,7 @@ func (ugc *UserGroupController) updateUsers(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
var groupDto dto.UserGroupDtoWithUsers
|
||||
var groupDto dto.UserGroupDto
|
||||
if err := dto.MapStruct(group, &groupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -221,3 +216,35 @@ func (ugc *UserGroupController) updateUsers(c *gin.Context) {
|
||||
|
||||
c.JSON(http.StatusOK, groupDto)
|
||||
}
|
||||
|
||||
// updateAllowedOidcClients godoc
|
||||
// @Summary Update allowed OIDC clients
|
||||
// @Description Update the OIDC clients allowed for a specific user group
|
||||
// @Tags OIDC
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param id path string true "User Group ID"
|
||||
// @Param groups body dto.UserGroupUpdateAllowedOidcClientsDto true "OIDC client IDs to allow"
|
||||
// @Success 200 {object} dto.UserGroupDto "Updated user group"
|
||||
// @Router /api/user-groups/{id}/allowed-oidc-clients [put]
|
||||
func (ugc *UserGroupController) updateAllowedOidcClients(c *gin.Context) {
|
||||
var input dto.UserGroupUpdateAllowedOidcClientsDto
|
||||
if err := c.ShouldBindJSON(&input); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
userGroup, err := ugc.UserGroupService.UpdateAllowedOidcClient(c.Request.Context(), c.Param("id"), input)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var userGroupDto dto.UserGroupDto
|
||||
if err := dto.MapStruct(userGroup, &userGroupDto); err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, userGroupDto)
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func (wc *WebauthnController) verifyRegistrationHandler(c *gin.Context) {
|
||||
}
|
||||
|
||||
userID := c.GetString("userID")
|
||||
credential, err := wc.webAuthnService.VerifyRegistration(c.Request.Context(), sessionID, userID, c.Request)
|
||||
credential, err := wc.webAuthnService.VerifyRegistration(c.Request.Context(), sessionID, userID, c.Request, c.ClientIP())
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
@@ -134,8 +134,10 @@ func (wc *WebauthnController) listCredentialsHandler(c *gin.Context) {
|
||||
func (wc *WebauthnController) deleteCredentialHandler(c *gin.Context) {
|
||||
userID := c.GetString("userID")
|
||||
credentialID := c.Param("id")
|
||||
clientIP := c.ClientIP()
|
||||
userAgent := c.Request.UserAgent()
|
||||
|
||||
err := wc.webAuthnService.DeleteCredential(c.Request.Context(), userID, credentialID)
|
||||
err := wc.webAuthnService.DeleteCredential(c.Request.Context(), userID, credentialID, clientIP, userAgent)
|
||||
if err != nil {
|
||||
_ = c.Error(err)
|
||||
return
|
||||
|
||||
@@ -47,7 +47,7 @@ type AppConfigUpdateDto struct {
|
||||
LdapAttributeGroupMember string `json:"ldapAttributeGroupMember"`
|
||||
LdapAttributeGroupUniqueIdentifier string `json:"ldapAttributeGroupUniqueIdentifier"`
|
||||
LdapAttributeGroupName string `json:"ldapAttributeGroupName"`
|
||||
LdapAttributeAdminGroup string `json:"ldapAttributeAdminGroup"`
|
||||
LdapAdminGroupName string `json:"ldapAdminGroupName"`
|
||||
LdapSoftDeleteUsers string `json:"ldapSoftDeleteUsers"`
|
||||
EmailOneTimeAccessAsAdminEnabled string `json:"emailOneTimeAccessAsAdminEnabled" binding:"required"`
|
||||
EmailOneTimeAccessAsUnauthenticatedEnabled string `json:"emailOneTimeAccessAsUnauthenticatedEnabled" binding:"required"`
|
||||
|
||||
@@ -17,10 +17,3 @@ type AuditLogDto struct {
|
||||
Username string `json:"username"`
|
||||
Data map[string]string `json:"data"`
|
||||
}
|
||||
|
||||
type AuditLogFilterDto struct {
|
||||
UserID string `form:"filters[userId]"`
|
||||
Event string `form:"filters[event]"`
|
||||
ClientName string `form:"filters[clientName]"`
|
||||
Location string `form:"filters[location]"`
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ type OidcClientMetaDataDto struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
HasLogo bool `json:"hasLogo"`
|
||||
HasDarkLogo bool `json:"hasDarkLogo"`
|
||||
LaunchURL *string `json:"launchURL"`
|
||||
RequiresReauthentication bool `json:"requiresReauthentication"`
|
||||
}
|
||||
@@ -17,11 +18,12 @@ type OidcClientDto struct {
|
||||
IsPublic bool `json:"isPublic"`
|
||||
PkceEnabled bool `json:"pkceEnabled"`
|
||||
Credentials OidcClientCredentialsDto `json:"credentials"`
|
||||
IsGroupRestricted bool `json:"isGroupRestricted"`
|
||||
}
|
||||
|
||||
type OidcClientWithAllowedUserGroupsDto struct {
|
||||
OidcClientDto
|
||||
AllowedUserGroups []UserGroupDtoWithUserCount `json:"allowedUserGroups"`
|
||||
AllowedUserGroups []UserGroupMinimalDto `json:"allowedUserGroups"`
|
||||
}
|
||||
|
||||
type OidcClientWithAllowedGroupsCountDto struct {
|
||||
@@ -39,7 +41,10 @@ type OidcClientUpdateDto struct {
|
||||
Credentials OidcClientCredentialsDto `json:"credentials"`
|
||||
LaunchURL *string `json:"launchURL" binding:"omitempty,url"`
|
||||
HasLogo bool `json:"hasLogo"`
|
||||
HasDarkLogo bool `json:"hasDarkLogo"`
|
||||
LogoURL *string `json:"logoUrl"`
|
||||
DarkLogoURL *string `json:"darkLogoUrl"`
|
||||
IsGroupRestricted bool `json:"isGroupRestricted"`
|
||||
}
|
||||
|
||||
type OidcClientCreateDto struct {
|
||||
@@ -134,6 +139,7 @@ type OidcDeviceAuthorizationRequestDto struct {
|
||||
ClientSecret string `form:"client_secret"`
|
||||
ClientAssertion string `form:"client_assertion"`
|
||||
ClientAssertionType string `form:"client_assertion_type"`
|
||||
Nonce string `form:"nonce"`
|
||||
}
|
||||
|
||||
type OidcDeviceAuthorizationResponseDto struct {
|
||||
|
||||
96
backend/internal/dto/scim_dto.go
Normal file
96
backend/internal/dto/scim_dto.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package dto
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
)
|
||||
|
||||
type ScimServiceProviderDTO struct {
|
||||
ID string `json:"id"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
Token string `json:"token"`
|
||||
LastSyncedAt *datatype.DateTime `json:"lastSyncedAt"`
|
||||
OidcClient OidcClientMetaDataDto `json:"oidcClient"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
}
|
||||
|
||||
type ScimServiceProviderCreateDTO struct {
|
||||
Endpoint string `json:"endpoint" binding:"required,url"`
|
||||
Token string `json:"token"`
|
||||
OidcClientID string `json:"oidcClientId" binding:"required"`
|
||||
}
|
||||
|
||||
type ScimUser struct {
|
||||
ScimResourceData
|
||||
UserName string `json:"userName"`
|
||||
Name *ScimName `json:"name,omitempty"`
|
||||
Display string `json:"displayName,omitempty"`
|
||||
Active bool `json:"active"`
|
||||
Emails []ScimEmail `json:"emails,omitempty"`
|
||||
}
|
||||
|
||||
type ScimName struct {
|
||||
GivenName string `json:"givenName,omitempty"`
|
||||
FamilyName string `json:"familyName,omitempty"`
|
||||
}
|
||||
|
||||
type ScimEmail struct {
|
||||
Value string `json:"value"`
|
||||
Primary bool `json:"primary,omitempty"`
|
||||
}
|
||||
|
||||
type ScimGroup struct {
|
||||
ScimResourceData
|
||||
Display string `json:"displayName"`
|
||||
Members []ScimGroupMember `json:"members,omitempty"`
|
||||
}
|
||||
|
||||
type ScimGroupMember struct {
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type ScimListResponse[T any] struct {
|
||||
Resources []T `json:"Resources"`
|
||||
TotalResults int `json:"totalResults"`
|
||||
StartIndex int `json:"startIndex"`
|
||||
ItemsPerPage int `json:"itemsPerPage"`
|
||||
}
|
||||
|
||||
type ScimResourceData struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
ExternalID string `json:"externalId,omitempty"`
|
||||
Schemas []string `json:"schemas"`
|
||||
Meta ScimResourceMeta `json:"meta,omitempty"`
|
||||
}
|
||||
|
||||
type ScimResourceMeta struct {
|
||||
Location string `json:"location,omitempty"`
|
||||
ResourceType string `json:"resourceType,omitempty"`
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
LastModified time.Time `json:"lastModified,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
func (r ScimResourceData) GetID() string {
|
||||
return r.ID
|
||||
}
|
||||
|
||||
func (r ScimResourceData) GetExternalID() string {
|
||||
return r.ExternalID
|
||||
}
|
||||
|
||||
func (r ScimResourceData) GetSchemas() []string {
|
||||
return r.Schemas
|
||||
}
|
||||
|
||||
func (r ScimResourceData) GetMeta() ScimResourceMeta {
|
||||
return r.Meta
|
||||
}
|
||||
|
||||
type ScimResource interface {
|
||||
GetID() string
|
||||
GetExternalID() string
|
||||
GetSchemas() []string
|
||||
GetMeta() ScimResourceMeta
|
||||
}
|
||||
@@ -6,15 +6,17 @@ import (
|
||||
)
|
||||
|
||||
type SignupTokenCreateDto struct {
|
||||
TTL utils.JSONDuration `json:"ttl" binding:"required,ttl"`
|
||||
UsageLimit int `json:"usageLimit" binding:"required,min=1,max=100"`
|
||||
TTL utils.JSONDuration `json:"ttl" binding:"required,ttl"`
|
||||
UsageLimit int `json:"usageLimit" binding:"required,min=1,max=100"`
|
||||
UserGroupIDs []string `json:"userGroupIds"`
|
||||
}
|
||||
|
||||
type SignupTokenDto struct {
|
||||
ID string `json:"id"`
|
||||
Token string `json:"token"`
|
||||
ExpiresAt datatype.DateTime `json:"expiresAt"`
|
||||
UsageLimit int `json:"usageLimit"`
|
||||
UsageCount int `json:"usageCount"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
ID string `json:"id"`
|
||||
Token string `json:"token"`
|
||||
ExpiresAt datatype.DateTime `json:"expiresAt"`
|
||||
UsageLimit int `json:"usageLimit"`
|
||||
UsageCount int `json:"usageCount"`
|
||||
UserGroups []UserGroupMinimalDto `json:"userGroups"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
}
|
||||
|
||||
@@ -8,30 +8,31 @@ import (
|
||||
)
|
||||
|
||||
type UserDto struct {
|
||||
ID string `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email *string `json:"email" `
|
||||
FirstName string `json:"firstName"`
|
||||
LastName *string `json:"lastName"`
|
||||
DisplayName string `json:"displayName"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Locale *string `json:"locale"`
|
||||
CustomClaims []CustomClaimDto `json:"customClaims"`
|
||||
UserGroups []UserGroupDto `json:"userGroups"`
|
||||
LdapID *string `json:"ldapId"`
|
||||
Disabled bool `json:"disabled"`
|
||||
ID string `json:"id"`
|
||||
Username string `json:"username"`
|
||||
Email *string `json:"email" `
|
||||
FirstName string `json:"firstName"`
|
||||
LastName *string `json:"lastName"`
|
||||
DisplayName string `json:"displayName"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Locale *string `json:"locale"`
|
||||
CustomClaims []CustomClaimDto `json:"customClaims"`
|
||||
UserGroups []UserGroupMinimalDto `json:"userGroups"`
|
||||
LdapID *string `json:"ldapId"`
|
||||
Disabled bool `json:"disabled"`
|
||||
}
|
||||
|
||||
type UserCreateDto struct {
|
||||
Username string `json:"username" binding:"required,username,min=2,max=50" unorm:"nfc"`
|
||||
Email *string `json:"email" binding:"omitempty,email" unorm:"nfc"`
|
||||
FirstName string `json:"firstName" binding:"required,min=1,max=50" unorm:"nfc"`
|
||||
LastName string `json:"lastName" binding:"max=50" unorm:"nfc"`
|
||||
DisplayName string `json:"displayName" binding:"required,min=1,max=100" unorm:"nfc"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Locale *string `json:"locale"`
|
||||
Disabled bool `json:"disabled"`
|
||||
LdapID string `json:"-"`
|
||||
Username string `json:"username" binding:"required,username,min=2,max=50" unorm:"nfc"`
|
||||
Email *string `json:"email" binding:"omitempty,email" unorm:"nfc"`
|
||||
FirstName string `json:"firstName" binding:"required,min=1,max=50" unorm:"nfc"`
|
||||
LastName string `json:"lastName" binding:"max=50" unorm:"nfc"`
|
||||
DisplayName string `json:"displayName" binding:"required,min=1,max=100" unorm:"nfc"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Locale *string `json:"locale"`
|
||||
Disabled bool `json:"disabled"`
|
||||
UserGroupIds []string `json:"userGroupIds"`
|
||||
LdapID string `json:"-"`
|
||||
}
|
||||
|
||||
func (u UserCreateDto) Validate() error {
|
||||
|
||||
@@ -8,25 +8,17 @@ import (
|
||||
)
|
||||
|
||||
type UserGroupDto struct {
|
||||
ID string `json:"id"`
|
||||
FriendlyName string `json:"friendlyName"`
|
||||
Name string `json:"name"`
|
||||
CustomClaims []CustomClaimDto `json:"customClaims"`
|
||||
LdapID *string `json:"ldapId"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
ID string `json:"id"`
|
||||
FriendlyName string `json:"friendlyName"`
|
||||
Name string `json:"name"`
|
||||
CustomClaims []CustomClaimDto `json:"customClaims"`
|
||||
LdapID *string `json:"ldapId"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
Users []UserDto `json:"users"`
|
||||
AllowedOidcClients []OidcClientMetaDataDto `json:"allowedOidcClients"`
|
||||
}
|
||||
|
||||
type UserGroupDtoWithUsers struct {
|
||||
ID string `json:"id"`
|
||||
FriendlyName string `json:"friendlyName"`
|
||||
Name string `json:"name"`
|
||||
CustomClaims []CustomClaimDto `json:"customClaims"`
|
||||
Users []UserDto `json:"users"`
|
||||
LdapID *string `json:"ldapId"`
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
}
|
||||
|
||||
type UserGroupDtoWithUserCount struct {
|
||||
type UserGroupMinimalDto struct {
|
||||
ID string `json:"id"`
|
||||
FriendlyName string `json:"friendlyName"`
|
||||
Name string `json:"name"`
|
||||
@@ -36,6 +28,10 @@ type UserGroupDtoWithUserCount struct {
|
||||
CreatedAt datatype.DateTime `json:"createdAt"`
|
||||
}
|
||||
|
||||
type UserGroupUpdateAllowedOidcClientsDto struct {
|
||||
OidcClientIDs []string `json:"oidcClientIds" binding:"required"`
|
||||
}
|
||||
|
||||
type UserGroupCreateDto struct {
|
||||
FriendlyName string `json:"friendlyName" binding:"required,min=2,max=50" unorm:"nfc"`
|
||||
Name string `json:"name" binding:"required,min=2,max=255" unorm:"nfc"`
|
||||
|
||||
@@ -67,14 +67,12 @@ func ValidateClientID(clientID string) bool {
|
||||
|
||||
// ValidateCallbackURL validates callback URLs with support for wildcards
|
||||
func ValidateCallbackURL(raw string) bool {
|
||||
if raw == "*" {
|
||||
// Don't validate if it contains a wildcard
|
||||
if strings.Contains(raw, "*") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Replace all '*' with 'x' to check if the rest is still a valid URI
|
||||
test := strings.ReplaceAll(raw, "*", "x")
|
||||
|
||||
u, err := url.Parse(test)
|
||||
u, err := url.Parse(raw)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ const heartbeatUrl = "https://analytics.pocket-id.org/heartbeat"
|
||||
|
||||
func (s *Scheduler) RegisterAnalyticsJob(ctx context.Context, appConfig *service.AppConfigService, httpClient *http.Client) error {
|
||||
// Skip if analytics are disabled or not in production environment
|
||||
if common.EnvConfig.AnalyticsDisabled || common.EnvConfig.AppEnv != "production" {
|
||||
if common.EnvConfig.AnalyticsDisabled || !common.EnvConfig.AppEnv.IsProduction() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ func (s *Scheduler) RegisterAnalyticsJob(ctx context.Context, appConfig *service
|
||||
appConfig: appConfig,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
return s.registerJob(ctx, "SendHeartbeat", gocron.DurationJob(24*time.Hour), jobs.sendHeartbeat, true)
|
||||
return s.RegisterJob(ctx, "SendHeartbeat", gocron.DurationJob(24*time.Hour), jobs.sendHeartbeat, true)
|
||||
}
|
||||
|
||||
type AnalyticsJob struct {
|
||||
@@ -39,7 +39,7 @@ type AnalyticsJob struct {
|
||||
// sendHeartbeat sends a heartbeat to the analytics service
|
||||
func (j *AnalyticsJob) sendHeartbeat(parentCtx context.Context) error {
|
||||
// Skip if analytics are disabled or not in production environment
|
||||
if common.EnvConfig.AnalyticsDisabled || common.EnvConfig.AppEnv != "production" {
|
||||
if common.EnvConfig.AnalyticsDisabled || !common.EnvConfig.AppEnv.IsProduction() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ func (s *Scheduler) RegisterApiKeyExpiryJob(ctx context.Context, apiKeyService *
|
||||
}
|
||||
|
||||
// Send every day at midnight
|
||||
return s.registerJob(ctx, "ExpiredApiKeyEmailJob", gocron.CronJob("0 0 * * *", false), jobs.checkAndNotifyExpiringApiKeys, false)
|
||||
return s.RegisterJob(ctx, "ExpiredApiKeyEmailJob", gocron.CronJob("0 0 * * *", false), jobs.checkAndNotifyExpiringApiKeys, false)
|
||||
}
|
||||
|
||||
func (j *ApiKeyEmailJobs) checkAndNotifyExpiringApiKeys(ctx context.Context) error {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
)
|
||||
@@ -20,13 +21,13 @@ func (s *Scheduler) RegisterDbCleanupJobs(ctx context.Context, db *gorm.DB) erro
|
||||
// Run every 24 hours (but with some jitter so they don't run at the exact same time), and now
|
||||
def := gocron.DurationRandomJob(24*time.Hour-2*time.Minute, 24*time.Hour+2*time.Minute)
|
||||
return errors.Join(
|
||||
s.registerJob(ctx, "ClearWebauthnSessions", def, jobs.clearWebauthnSessions, true),
|
||||
s.registerJob(ctx, "ClearOneTimeAccessTokens", def, jobs.clearOneTimeAccessTokens, true),
|
||||
s.registerJob(ctx, "ClearSignupTokens", def, jobs.clearSignupTokens, true),
|
||||
s.registerJob(ctx, "ClearOidcAuthorizationCodes", def, jobs.clearOidcAuthorizationCodes, true),
|
||||
s.registerJob(ctx, "ClearOidcRefreshTokens", def, jobs.clearOidcRefreshTokens, true),
|
||||
s.registerJob(ctx, "ClearReauthenticationTokens", def, jobs.clearReauthenticationTokens, true),
|
||||
s.registerJob(ctx, "ClearAuditLogs", def, jobs.clearAuditLogs, true),
|
||||
s.RegisterJob(ctx, "ClearWebauthnSessions", def, jobs.clearWebauthnSessions, true),
|
||||
s.RegisterJob(ctx, "ClearOneTimeAccessTokens", def, jobs.clearOneTimeAccessTokens, true),
|
||||
s.RegisterJob(ctx, "ClearSignupTokens", def, jobs.clearSignupTokens, true),
|
||||
s.RegisterJob(ctx, "ClearOidcAuthorizationCodes", def, jobs.clearOidcAuthorizationCodes, true),
|
||||
s.RegisterJob(ctx, "ClearOidcRefreshTokens", def, jobs.clearOidcRefreshTokens, true),
|
||||
s.RegisterJob(ctx, "ClearReauthenticationTokens", def, jobs.clearReauthenticationTokens, true),
|
||||
s.RegisterJob(ctx, "ClearAuditLogs", def, jobs.clearAuditLogs, true),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -119,11 +120,13 @@ func (j *DbCleanupJobs) clearReauthenticationTokens(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAuditLogs deletes audit logs older than 90 days
|
||||
// ClearAuditLogs deletes audit logs older than the configured retention window
|
||||
func (j *DbCleanupJobs) clearAuditLogs(ctx context.Context) error {
|
||||
cutoff := time.Now().AddDate(0, 0, -common.EnvConfig.AuditLogRetentionDays)
|
||||
|
||||
st := j.db.
|
||||
WithContext(ctx).
|
||||
Delete(&model.AuditLog{}, "created_at < ?", datatype.DateTime(time.Now().AddDate(0, 0, -90)))
|
||||
Delete(&model.AuditLog{}, "created_at < ?", datatype.DateTime(cutoff))
|
||||
if st.Error != nil {
|
||||
return fmt.Errorf("failed to delete old audit logs: %w", st.Error)
|
||||
}
|
||||
|
||||
@@ -2,29 +2,36 @@ package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
)
|
||||
|
||||
func (s *Scheduler) RegisterFileCleanupJobs(ctx context.Context, db *gorm.DB) error {
|
||||
jobs := &FileCleanupJobs{db: db}
|
||||
func (s *Scheduler) RegisterFileCleanupJobs(ctx context.Context, db *gorm.DB, fileStorage storage.FileStorage) error {
|
||||
jobs := &FileCleanupJobs{db: db, fileStorage: fileStorage}
|
||||
|
||||
// Run every 24 hours
|
||||
return s.registerJob(ctx, "ClearUnusedDefaultProfilePictures", gocron.DurationJob(24*time.Hour), jobs.clearUnusedDefaultProfilePictures, false)
|
||||
err := s.RegisterJob(ctx, "ClearUnusedDefaultProfilePictures", gocron.DurationJob(24*time.Hour), jobs.clearUnusedDefaultProfilePictures, false)
|
||||
|
||||
// Only necessary for file system storage
|
||||
if fileStorage.Type() == storage.TypeFileSystem {
|
||||
err = errors.Join(err, s.RegisterJob(ctx, "ClearOrphanedTempFiles", gocron.DurationJob(12*time.Hour), jobs.clearOrphanedTempFiles, true))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
type FileCleanupJobs struct {
|
||||
db *gorm.DB
|
||||
db *gorm.DB
|
||||
fileStorage storage.FileStorage
|
||||
}
|
||||
|
||||
// ClearUnusedDefaultProfilePictures deletes default profile pictures that don't match any user's initials
|
||||
@@ -44,29 +51,24 @@ func (j *FileCleanupJobs) clearUnusedDefaultProfilePictures(ctx context.Context)
|
||||
initialsInUse[user.Initials()] = struct{}{}
|
||||
}
|
||||
|
||||
defaultPicturesDir := common.EnvConfig.UploadPath + "/profile-pictures/defaults"
|
||||
if _, err := os.Stat(defaultPicturesDir); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(defaultPicturesDir)
|
||||
defaultPicturesDir := path.Join("profile-pictures", "defaults")
|
||||
files, err := j.fileStorage.List(ctx, defaultPicturesDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read default profile pictures directory: %w", err)
|
||||
return fmt.Errorf("failed to list default profile pictures: %w", err)
|
||||
}
|
||||
|
||||
filesDeleted := 0
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
continue // Skip directories
|
||||
_, filename := path.Split(file.Path)
|
||||
if filename == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
filename := file.Name()
|
||||
initials := strings.TrimSuffix(filename, ".png")
|
||||
|
||||
// If these initials aren't used by any user, delete the file
|
||||
if _, ok := initialsInUse[initials]; !ok {
|
||||
filePath := filepath.Join(defaultPicturesDir, filename)
|
||||
if err := os.Remove(filePath); err != nil {
|
||||
filePath := path.Join(defaultPicturesDir, filename)
|
||||
if err := j.fileStorage.Delete(ctx, filePath); err != nil {
|
||||
slog.ErrorContext(ctx, "Failed to delete unused default profile picture", slog.String("path", filePath), slog.Any("error", err))
|
||||
} else {
|
||||
filesDeleted++
|
||||
@@ -77,3 +79,34 @@ func (j *FileCleanupJobs) clearUnusedDefaultProfilePictures(ctx context.Context)
|
||||
slog.Info("Done deleting unused default profile pictures", slog.Int("count", filesDeleted))
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearOrphanedTempFiles deletes temporary files that are produced by failed atomic writes
|
||||
func (j *FileCleanupJobs) clearOrphanedTempFiles(ctx context.Context) error {
|
||||
const minAge = 10 * time.Minute
|
||||
|
||||
var deleted int
|
||||
err := j.fileStorage.Walk(ctx, "/", func(p storage.ObjectInfo) error {
|
||||
// Only temp files
|
||||
if !strings.HasSuffix(p.Path, "-tmp") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if time.Since(p.ModTime) < minAge {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := j.fileStorage.Delete(ctx, p.Path); err != nil {
|
||||
slog.ErrorContext(ctx, "Failed to delete temp file", slog.String("path", p.Path), slog.Any("error", err))
|
||||
return nil
|
||||
}
|
||||
deleted++
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scan storage: %w", err)
|
||||
}
|
||||
|
||||
slog.Info("Done cleaning orphaned temp files", slog.Int("count", deleted))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func (s *Scheduler) RegisterGeoLiteUpdateJobs(ctx context.Context, geoLiteServic
|
||||
jobs := &GeoLiteUpdateJobs{geoLiteService: geoLiteService}
|
||||
|
||||
// Run every 24 hours (and right away)
|
||||
return s.registerJob(ctx, "UpdateGeoLiteDB", gocron.DurationJob(24*time.Hour), jobs.updateGoeLiteDB, true)
|
||||
return s.RegisterJob(ctx, "UpdateGeoLiteDB", gocron.DurationJob(24*time.Hour), jobs.updateGoeLiteDB, true)
|
||||
}
|
||||
|
||||
func (j *GeoLiteUpdateJobs) updateGoeLiteDB(ctx context.Context) error {
|
||||
|
||||
@@ -18,7 +18,7 @@ func (s *Scheduler) RegisterLdapJobs(ctx context.Context, ldapService *service.L
|
||||
jobs := &LdapJobs{ldapService: ldapService, appConfigService: appConfigService}
|
||||
|
||||
// Register the job to run every hour
|
||||
return s.registerJob(ctx, "SyncLdap", gocron.DurationJob(time.Hour), jobs.syncLdap, true)
|
||||
return s.RegisterJob(ctx, "SyncLdap", gocron.DurationJob(time.Hour), jobs.syncLdap, true)
|
||||
}
|
||||
|
||||
func (j *LdapJobs) syncLdap(ctx context.Context) error {
|
||||
|
||||
@@ -2,6 +2,7 @@ package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
|
||||
@@ -24,6 +25,26 @@ func NewScheduler() (*Scheduler, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) RemoveJob(name string) error {
|
||||
jobs := s.scheduler.Jobs()
|
||||
|
||||
var errs []error
|
||||
for _, job := range jobs {
|
||||
if job.Name() == name {
|
||||
err := s.scheduler.RemoveJob(job.ID())
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to unqueue job %q with ID %q: %w", name, job.ID().String(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run the scheduler.
|
||||
// This function blocks until the context is canceled.
|
||||
func (s *Scheduler) Run(ctx context.Context) error {
|
||||
@@ -43,9 +64,10 @@ func (s *Scheduler) Run(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Scheduler) registerJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, runImmediately bool) error {
|
||||
func (s *Scheduler) RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, runImmediately bool, extraOptions ...gocron.JobOption) error {
|
||||
jobOptions := []gocron.JobOption{
|
||||
gocron.WithContext(ctx),
|
||||
gocron.WithName(name),
|
||||
gocron.WithEventListeners(
|
||||
gocron.BeforeJobRuns(func(jobID uuid.UUID, jobName string) {
|
||||
slog.Info("Starting job",
|
||||
@@ -73,6 +95,8 @@ func (s *Scheduler) registerJob(ctx context.Context, name string, def gocron.Job
|
||||
jobOptions = append(jobOptions, gocron.JobOption(gocron.WithStartImmediately()))
|
||||
}
|
||||
|
||||
jobOptions = append(jobOptions, extraOptions...)
|
||||
|
||||
_, err := s.scheduler.NewJob(def, gocron.NewTask(job), jobOptions...)
|
||||
|
||||
if err != nil {
|
||||
|
||||
25
backend/internal/job/scim_job.go
Normal file
25
backend/internal/job/scim_job.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/service"
|
||||
)
|
||||
|
||||
type ScimJobs struct {
|
||||
scimService *service.ScimService
|
||||
}
|
||||
|
||||
func (s *Scheduler) RegisterScimJobs(ctx context.Context, scimService *service.ScimService) error {
|
||||
jobs := &ScimJobs{scimService: scimService}
|
||||
|
||||
// Register the job to run every hour
|
||||
return s.RegisterJob(ctx, "SyncScim", gocron.DurationJob(time.Hour), jobs.SyncScim, true)
|
||||
}
|
||||
|
||||
func (j *ScimJobs) SyncScim(ctx context.Context) error {
|
||||
return j.scimService.SyncAll(ctx)
|
||||
}
|
||||
26
backend/internal/middleware/cache_control.go
Normal file
26
backend/internal/middleware/cache_control.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package middleware
|
||||
|
||||
import "github.com/gin-gonic/gin"
|
||||
|
||||
// CacheControlMiddleware sets a safe default Cache-Control header on responses
|
||||
// that do not already specify one. This prevents proxies from caching
|
||||
// authenticated responses that might contain private data.
|
||||
type CacheControlMiddleware struct {
|
||||
headerValue string
|
||||
}
|
||||
|
||||
func NewCacheControlMiddleware() *CacheControlMiddleware {
|
||||
return &CacheControlMiddleware{
|
||||
headerValue: "private, no-store",
|
||||
}
|
||||
}
|
||||
|
||||
func (m *CacheControlMiddleware) Add() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Writer.Header().Get("Cache-Control") == "" {
|
||||
c.Header("Cache-Control", m.headerValue)
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
45
backend/internal/middleware/cache_control_test.go
Normal file
45
backend/internal/middleware/cache_control_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCacheControlMiddlewareSetsDefault(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
router.Use(NewCacheControlMiddleware().Add())
|
||||
|
||||
router.GET("/test", func(c *gin.Context) {
|
||||
c.Status(http.StatusOK)
|
||||
})
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/test", http.NoBody)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, "private, no-store", w.Header().Get("Cache-Control"))
|
||||
}
|
||||
|
||||
func TestCacheControlMiddlewarePreservesExistingHeader(t *testing.T) {
|
||||
gin.SetMode(gin.TestMode)
|
||||
router := gin.New()
|
||||
router.Use(NewCacheControlMiddleware().Add())
|
||||
|
||||
router.GET("/custom", func(c *gin.Context) {
|
||||
c.Header("Cache-Control", "public, max-age=60")
|
||||
c.Status(http.StatusOK)
|
||||
})
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/custom", http.NoBody)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
router.ServeHTTP(w, req)
|
||||
|
||||
require.Equal(t, "public, max-age=60", w.Header().Get("Cache-Control"))
|
||||
}
|
||||
40
backend/internal/middleware/head_middleware.go
Normal file
40
backend/internal/middleware/head_middleware.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type IsHeadRequestCtxKey struct{}
|
||||
|
||||
type headWriter struct {
|
||||
gin.ResponseWriter
|
||||
size int
|
||||
}
|
||||
|
||||
func (w *headWriter) Write(b []byte) (int, error) {
|
||||
w.size += len(b)
|
||||
return w.size, nil
|
||||
}
|
||||
|
||||
func HeadMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Only process if it's a HEAD request
|
||||
if c.Request.Context().Value(IsHeadRequestCtxKey{}) != true {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Replace the ResponseWriter with our headWriter to swallow the body
|
||||
hw := &headWriter{ResponseWriter: c.Writer}
|
||||
c.Writer = hw
|
||||
|
||||
c.Next()
|
||||
|
||||
c.Writer.Header().Set("Content-Length", strconv.Itoa(hw.size))
|
||||
c.Request.Method = http.MethodHead
|
||||
|
||||
}
|
||||
}
|
||||
@@ -29,7 +29,7 @@ func (m *RateLimitMiddleware) Add(limit rate.Limit, burst int) gin.HandlerFunc {
|
||||
|
||||
// Skip rate limiting for localhost and test environment
|
||||
// If the client ip is localhost the request comes from the frontend
|
||||
if ip == "" || ip == "127.0.0.1" || ip == "::1" || common.EnvConfig.AppEnv == "test" {
|
||||
if ip == "" || ip == "127.0.0.1" || ip == "::1" || common.EnvConfig.AppEnv.IsTest() {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ type AppConfig struct {
|
||||
LdapAttributeGroupMember AppConfigVariable `key:"ldapAttributeGroupMember"`
|
||||
LdapAttributeGroupUniqueIdentifier AppConfigVariable `key:"ldapAttributeGroupUniqueIdentifier"`
|
||||
LdapAttributeGroupName AppConfigVariable `key:"ldapAttributeGroupName"`
|
||||
LdapAttributeAdminGroup AppConfigVariable `key:"ldapAttributeAdminGroup"`
|
||||
LdapAdminGroupName AppConfigVariable `key:"ldapAdminGroupName"`
|
||||
LdapSoftDeleteUsers AppConfigVariable `key:"ldapSoftDeleteUsers"`
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,14 @@ package model
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
type AuditLog struct {
|
||||
Base
|
||||
|
||||
Event AuditLogEvent `sortable:"true"`
|
||||
Event AuditLogEvent `sortable:"true" filterable:"true"`
|
||||
IpAddress *string `sortable:"true"`
|
||||
Country string `sortable:"true"`
|
||||
City string `sortable:"true"`
|
||||
@@ -17,7 +18,7 @@ type AuditLog struct {
|
||||
Username string `gorm:"-"`
|
||||
Data AuditLogData
|
||||
|
||||
UserID string
|
||||
UserID string `filterable:"true"`
|
||||
User User
|
||||
}
|
||||
|
||||
@@ -33,6 +34,8 @@ const (
|
||||
AuditLogEventNewClientAuthorization AuditLogEvent = "NEW_CLIENT_AUTHORIZATION"
|
||||
AuditLogEventDeviceCodeAuthorization AuditLogEvent = "DEVICE_CODE_AUTHORIZATION"
|
||||
AuditLogEventNewDeviceCodeAuthorization AuditLogEvent = "NEW_DEVICE_CODE_AUTHORIZATION"
|
||||
AuditLogEventPasskeyAdded AuditLogEvent = "PASSKEY_ADDED"
|
||||
AuditLogEventPasskeyRemoved AuditLogEvent = "PASSKEY_REMOVED"
|
||||
)
|
||||
|
||||
// Scan and Value methods for GORM to handle the custom type
|
||||
@@ -47,14 +50,7 @@ func (e AuditLogEvent) Value() (driver.Value, error) {
|
||||
}
|
||||
|
||||
func (d *AuditLogData) Scan(value any) error {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(v, d)
|
||||
case string:
|
||||
return json.Unmarshal([]byte(v), d)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type: %T", value)
|
||||
}
|
||||
return utils.UnmarshalJSONFromDatabase(d, value)
|
||||
}
|
||||
|
||||
func (d AuditLogData) Value() (driver.Value, error) {
|
||||
|
||||
@@ -3,10 +3,10 @@ package model
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
type UserAuthorizedOidcClient struct {
|
||||
@@ -52,11 +52,13 @@ type OidcClient struct {
|
||||
CallbackURLs UrlList
|
||||
LogoutCallbackURLs UrlList
|
||||
ImageType *string
|
||||
DarkImageType *string
|
||||
IsPublic bool
|
||||
PkceEnabled bool
|
||||
RequiresReauthentication bool
|
||||
PkceEnabled bool `sortable:"true" filterable:"true"`
|
||||
RequiresReauthentication bool `sortable:"true" filterable:"true"`
|
||||
Credentials OidcClientCredentials
|
||||
LaunchURL *string
|
||||
IsGroupRestricted bool `sortable:"true" filterable:"true"`
|
||||
|
||||
AllowedUserGroups []UserGroup `gorm:"many2many:oidc_clients_allowed_user_groups;"`
|
||||
CreatedByID *string
|
||||
@@ -68,6 +70,10 @@ func (c OidcClient) HasLogo() bool {
|
||||
return c.ImageType != nil && *c.ImageType != ""
|
||||
}
|
||||
|
||||
func (c OidcClient) HasDarkLogo() bool {
|
||||
return c.DarkImageType != nil && *c.DarkImageType != ""
|
||||
}
|
||||
|
||||
type OidcRefreshToken struct {
|
||||
Base
|
||||
|
||||
@@ -116,14 +122,7 @@ func (occ OidcClientCredentials) FederatedIdentityForIssuer(issuer string) (Oidc
|
||||
}
|
||||
|
||||
func (occ *OidcClientCredentials) Scan(value any) error {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(v, occ)
|
||||
case string:
|
||||
return json.Unmarshal([]byte(v), occ)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type: %T", value)
|
||||
}
|
||||
return utils.UnmarshalJSONFromDatabase(occ, value)
|
||||
}
|
||||
|
||||
func (occ OidcClientCredentials) Value() (driver.Value, error) {
|
||||
@@ -133,14 +132,7 @@ func (occ OidcClientCredentials) Value() (driver.Value, error) {
|
||||
type UrlList []string //nolint:recvcheck
|
||||
|
||||
func (cu *UrlList) Scan(value any) error {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(v, cu)
|
||||
case string:
|
||||
return json.Unmarshal([]byte(v), cu)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type: %T", value)
|
||||
}
|
||||
return utils.UnmarshalJSONFromDatabase(cu, value)
|
||||
}
|
||||
|
||||
func (cu UrlList) Value() (driver.Value, error) {
|
||||
@@ -152,6 +144,7 @@ type OidcDeviceCode struct {
|
||||
DeviceCode string
|
||||
UserCode string
|
||||
Scope string
|
||||
Nonce string
|
||||
ExpiresAt datatype.DateTime
|
||||
IsAuthorized bool
|
||||
|
||||
|
||||
14
backend/internal/model/scim.go
Normal file
14
backend/internal/model/scim.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package model
|
||||
|
||||
import datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
|
||||
type ScimServiceProvider struct {
|
||||
Base
|
||||
|
||||
Endpoint string `sortable:"true"`
|
||||
Token datatype.EncryptedString
|
||||
LastSyncedAt *datatype.DateTime `sortable:"true"`
|
||||
|
||||
OidcClientID string
|
||||
OidcClient OidcClient `gorm:"foreignKey:OidcClientID;references:ID;"`
|
||||
}
|
||||
@@ -13,6 +13,7 @@ type SignupToken struct {
|
||||
ExpiresAt datatype.DateTime `json:"expiresAt" sortable:"true"`
|
||||
UsageLimit int `json:"usageLimit" sortable:"true"`
|
||||
UsageCount int `json:"usageCount" sortable:"true"`
|
||||
UserGroups []UserGroup `gorm:"many2many:signup_tokens_user_groups;"`
|
||||
}
|
||||
|
||||
func (st *SignupToken) IsExpired() bool {
|
||||
|
||||
17
backend/internal/model/storage.go
Normal file
17
backend/internal/model/storage.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
Path string `gorm:"primaryKey"`
|
||||
Data []byte
|
||||
Size int64
|
||||
ModTime datatype.DateTime
|
||||
CreatedAt datatype.DateTime
|
||||
}
|
||||
|
||||
func (Storage) TableName() string {
|
||||
return "storage"
|
||||
}
|
||||
@@ -11,6 +11,15 @@ import (
|
||||
// DateTime custom type for time.Time to store date as unix timestamp for sqlite and as date for postgres
|
||||
type DateTime time.Time //nolint:recvcheck
|
||||
|
||||
func DateTimeFromString(str string) (DateTime, error) {
|
||||
t, err := time.Parse(time.RFC3339Nano, str)
|
||||
if err != nil {
|
||||
return DateTime{}, fmt.Errorf("failed to parse date string: %w", err)
|
||||
}
|
||||
|
||||
return DateTime(t), nil
|
||||
}
|
||||
|
||||
func (date *DateTime) Scan(value any) (err error) {
|
||||
switch v := value.(type) {
|
||||
case time.Time:
|
||||
|
||||
91
backend/internal/model/types/encrypted_string.go
Normal file
91
backend/internal/model/types/encrypted_string.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package datatype
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"database/sql/driver"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
cryptoutils "github.com/pocket-id/pocket-id/backend/internal/utils/crypto"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
)
|
||||
|
||||
const encryptedStringAAD = "encrypted_string"
|
||||
|
||||
var encStringKey []byte
|
||||
|
||||
// EncryptedString stores plaintext in memory and persists encrypted data in the database.
|
||||
type EncryptedString string //nolint:recvcheck
|
||||
|
||||
func (e *EncryptedString) Scan(value any) error {
|
||||
if value == nil {
|
||||
*e = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
var raw string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
raw = v
|
||||
case []byte:
|
||||
raw = string(v)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type for EncryptedString: %T", value)
|
||||
}
|
||||
|
||||
if raw == "" {
|
||||
*e = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
encBytes, err := base64.StdEncoding.DecodeString(raw)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode encrypted string: %w", err)
|
||||
}
|
||||
|
||||
decBytes, err := cryptoutils.Decrypt(encStringKey, encBytes, []byte(encryptedStringAAD))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decrypt encrypted string: %w", err)
|
||||
}
|
||||
|
||||
*e = EncryptedString(decBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e EncryptedString) Value() (driver.Value, error) {
|
||||
if e == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
encBytes, err := cryptoutils.Encrypt(encStringKey, []byte(e), []byte(encryptedStringAAD))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encrypt string: %w", err)
|
||||
}
|
||||
|
||||
return base64.StdEncoding.EncodeToString(encBytes), nil
|
||||
}
|
||||
|
||||
func (e EncryptedString) String() string {
|
||||
return string(e)
|
||||
}
|
||||
|
||||
func deriveEncryptedStringKey(master []byte) ([]byte, error) {
|
||||
const info = "pocketid/encrypted_string"
|
||||
r := hkdf.New(sha256.New, master, nil, []byte(info))
|
||||
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(r, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
key, err := deriveEncryptedStringKey(common.EnvConfig.EncryptionKey)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to derive encrypted string key: %v", err))
|
||||
}
|
||||
encStringKey = key
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package model
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-webauthn/webauthn/protocol"
|
||||
"github.com/go-webauthn/webauthn/webauthn"
|
||||
@@ -18,10 +19,11 @@ type User struct {
|
||||
FirstName string `sortable:"true"`
|
||||
LastName string `sortable:"true"`
|
||||
DisplayName string `sortable:"true"`
|
||||
IsAdmin bool `sortable:"true"`
|
||||
IsAdmin bool `sortable:"true" filterable:"true"`
|
||||
Locale *string
|
||||
LdapID *string
|
||||
Disabled bool `sortable:"true"`
|
||||
Disabled bool `sortable:"true" filterable:"true"`
|
||||
UpdatedAt *datatype.DateTime
|
||||
|
||||
CustomClaims []CustomClaim
|
||||
UserGroups []UserGroup `gorm:"many2many:user_groups_users;"`
|
||||
@@ -85,10 +87,18 @@ func (u User) Initials() string {
|
||||
return strings.ToUpper(first + last)
|
||||
}
|
||||
|
||||
func (u User) LastModified() time.Time {
|
||||
if u.UpdatedAt != nil {
|
||||
return u.UpdatedAt.ToTime()
|
||||
}
|
||||
return u.CreatedAt.ToTime()
|
||||
}
|
||||
|
||||
type OneTimeAccessToken struct {
|
||||
Base
|
||||
Token string
|
||||
ExpiresAt datatype.DateTime
|
||||
Token string
|
||||
DeviceToken *string
|
||||
ExpiresAt datatype.DateTime
|
||||
|
||||
UserID string
|
||||
User User
|
||||
|
||||
@@ -1,10 +1,25 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
)
|
||||
|
||||
type UserGroup struct {
|
||||
Base
|
||||
FriendlyName string `sortable:"true"`
|
||||
Name string `sortable:"true"`
|
||||
LdapID *string
|
||||
Users []User `gorm:"many2many:user_groups_users;"`
|
||||
CustomClaims []CustomClaim
|
||||
FriendlyName string `sortable:"true"`
|
||||
Name string `sortable:"true"`
|
||||
LdapID *string
|
||||
UpdatedAt *datatype.DateTime
|
||||
Users []User `gorm:"many2many:user_groups_users;"`
|
||||
CustomClaims []CustomClaim
|
||||
AllowedOidcClients []OidcClient `gorm:"many2many:oidc_clients_allowed_user_groups;"`
|
||||
}
|
||||
|
||||
func (ug UserGroup) LastModified() time.Time {
|
||||
if ug.UpdatedAt != nil {
|
||||
return ug.UpdatedAt.ToTime()
|
||||
}
|
||||
return ug.CreatedAt.ToTime()
|
||||
}
|
||||
|
||||
@@ -3,11 +3,11 @@ package model
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-webauthn/webauthn/protocol"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
type WebauthnSession struct {
|
||||
@@ -16,6 +16,7 @@ type WebauthnSession struct {
|
||||
Challenge string
|
||||
ExpiresAt datatype.DateTime
|
||||
UserVerification string
|
||||
CredentialParams CredentialParameters
|
||||
}
|
||||
|
||||
type WebauthnCredential struct {
|
||||
@@ -58,16 +59,20 @@ type AuthenticatorTransportList []protocol.AuthenticatorTransport //nolint:recvc
|
||||
|
||||
// Scan and Value methods for GORM to handle the custom type
|
||||
func (atl *AuthenticatorTransportList) Scan(value interface{}) error {
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
return json.Unmarshal(v, atl)
|
||||
case string:
|
||||
return json.Unmarshal([]byte(v), atl)
|
||||
default:
|
||||
return fmt.Errorf("unsupported type: %T", value)
|
||||
}
|
||||
return utils.UnmarshalJSONFromDatabase(atl, value)
|
||||
}
|
||||
|
||||
func (atl AuthenticatorTransportList) Value() (driver.Value, error) {
|
||||
return json.Marshal(atl)
|
||||
}
|
||||
|
||||
type CredentialParameters []protocol.CredentialParameter //nolint:recvcheck
|
||||
|
||||
// Scan and Value methods for GORM to handle the custom type
|
||||
func (cp *CredentialParameters) Scan(value interface{}) error {
|
||||
return utils.UnmarshalJSONFromDatabase(cp, value)
|
||||
}
|
||||
|
||||
func (cp CredentialParameters) Value() (driver.Value, error) {
|
||||
return json.Marshal(cp)
|
||||
}
|
||||
|
||||
@@ -25,14 +25,14 @@ func NewApiKeyService(db *gorm.DB, emailService *EmailService) *ApiKeyService {
|
||||
return &ApiKeyService{db: db, emailService: emailService}
|
||||
}
|
||||
|
||||
func (s *ApiKeyService) ListApiKeys(ctx context.Context, userID string, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.ApiKey, utils.PaginationResponse, error) {
|
||||
func (s *ApiKeyService) ListApiKeys(ctx context.Context, userID string, listRequestOptions utils.ListRequestOptions) ([]model.ApiKey, utils.PaginationResponse, error) {
|
||||
query := s.db.
|
||||
WithContext(ctx).
|
||||
Where("user_id = ?", userID).
|
||||
Model(&model.ApiKey{})
|
||||
|
||||
var apiKeys []model.ApiKey
|
||||
pagination, err := utils.PaginateAndSort(sortedPaginationRequest, query, &apiKeys)
|
||||
pagination, err := utils.PaginateFilterAndSort(listRequestOptions, query, &apiKeys)
|
||||
if err != nil {
|
||||
return nil, utils.PaginationResponse{}, err
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func (s *AppConfigService) getDefaultDbConfig() *model.AppConfig {
|
||||
LdapAttributeGroupMember: model.AppConfigVariable{Value: "member"},
|
||||
LdapAttributeGroupUniqueIdentifier: model.AppConfigVariable{},
|
||||
LdapAttributeGroupName: model.AppConfigVariable{},
|
||||
LdapAttributeAdminGroup: model.AppConfigVariable{},
|
||||
LdapAdminGroupName: model.AppConfigVariable{},
|
||||
LdapSoftDeleteUsers: model.AppConfigVariable{Value: "true"},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,42 +1,52 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime/multipart"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
type AppImagesService struct {
|
||||
mu sync.RWMutex
|
||||
extensions map[string]string
|
||||
storage storage.FileStorage
|
||||
}
|
||||
|
||||
func NewAppImagesService(extensions map[string]string) *AppImagesService {
|
||||
return &AppImagesService{extensions: extensions}
|
||||
func NewAppImagesService(extensions map[string]string, storage storage.FileStorage) *AppImagesService {
|
||||
return &AppImagesService{extensions: extensions, storage: storage}
|
||||
}
|
||||
|
||||
func (s *AppImagesService) GetImage(name string) (string, string, error) {
|
||||
func (s *AppImagesService) GetImage(ctx context.Context, name string) (io.ReadCloser, int64, string, error) {
|
||||
ext, err := s.getExtension(name)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return nil, 0, "", err
|
||||
}
|
||||
|
||||
mimeType := utils.GetImageMimeType(ext)
|
||||
if mimeType == "" {
|
||||
return "", "", fmt.Errorf("unsupported image type '%s'", ext)
|
||||
return nil, 0, "", fmt.Errorf("unsupported image type '%s'", ext)
|
||||
}
|
||||
|
||||
imagePath := filepath.Join(common.EnvConfig.UploadPath, "application-images", fmt.Sprintf("%s.%s", name, ext))
|
||||
return imagePath, mimeType, nil
|
||||
imagePath := path.Join("application-images", name+"."+ext)
|
||||
reader, size, err := s.storage.Open(ctx, imagePath)
|
||||
if err != nil {
|
||||
if storage.IsNotExist(err) {
|
||||
return nil, 0, "", &common.ImageNotFoundError{}
|
||||
}
|
||||
return nil, 0, "", err
|
||||
}
|
||||
return reader, size, mimeType, nil
|
||||
}
|
||||
|
||||
func (s *AppImagesService) UpdateImage(file *multipart.FileHeader, imageName string) error {
|
||||
func (s *AppImagesService) UpdateImage(ctx context.Context, file *multipart.FileHeader, imageName string) error {
|
||||
fileType := strings.ToLower(utils.GetFileExtension(file.Filename))
|
||||
mimeType := utils.GetImageMimeType(fileType)
|
||||
if mimeType == "" {
|
||||
@@ -48,18 +58,23 @@ func (s *AppImagesService) UpdateImage(file *multipart.FileHeader, imageName str
|
||||
|
||||
currentExt, ok := s.extensions[imageName]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown application image '%s'", imageName)
|
||||
s.extensions[imageName] = fileType
|
||||
}
|
||||
|
||||
imagePath := filepath.Join(common.EnvConfig.UploadPath, "application-images", fmt.Sprintf("%s.%s", imageName, fileType))
|
||||
imagePath := path.Join("application-images", imageName+"."+fileType)
|
||||
fileReader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fileReader.Close()
|
||||
|
||||
if err := utils.SaveFile(file, imagePath); err != nil {
|
||||
if err := s.storage.Save(ctx, imagePath, fileReader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if currentExt != "" && currentExt != fileType {
|
||||
oldImagePath := filepath.Join(common.EnvConfig.UploadPath, "application-images", fmt.Sprintf("%s.%s", imageName, currentExt))
|
||||
if err := os.Remove(oldImagePath); err != nil && !os.IsNotExist(err) {
|
||||
oldImagePath := path.Join("application-images", imageName+"."+currentExt)
|
||||
if err := s.storage.Delete(ctx, oldImagePath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -69,13 +84,39 @@ func (s *AppImagesService) UpdateImage(file *multipart.FileHeader, imageName str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AppImagesService) DeleteImage(ctx context.Context, imageName string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
ext, ok := s.extensions[imageName]
|
||||
if !ok || ext == "" {
|
||||
return &common.ImageNotFoundError{}
|
||||
}
|
||||
|
||||
imagePath := path.Join("application-images", imageName+"."+ext)
|
||||
if err := s.storage.Delete(ctx, imagePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(s.extensions, imageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AppImagesService) IsDefaultProfilePictureSet() bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
_, ok := s.extensions["default-profile-picture"]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *AppImagesService) getExtension(name string) (string, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
ext, ok := s.extensions[name]
|
||||
if !ok || ext == "" {
|
||||
return "", fmt.Errorf("unknown application image '%s'", name)
|
||||
return "", &common.ImageNotFoundError{}
|
||||
}
|
||||
|
||||
return strings.ToLower(ext), nil
|
||||
|
||||
@@ -2,66 +2,92 @@ package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
)
|
||||
|
||||
func TestAppImagesService_GetImage(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
originalUploadPath := common.EnvConfig.UploadPath
|
||||
common.EnvConfig.UploadPath = tempDir
|
||||
t.Cleanup(func() {
|
||||
common.EnvConfig.UploadPath = originalUploadPath
|
||||
})
|
||||
|
||||
imagesDir := filepath.Join(tempDir, "application-images")
|
||||
require.NoError(t, os.MkdirAll(imagesDir, 0o755))
|
||||
|
||||
filePath := filepath.Join(imagesDir, "background.webp")
|
||||
require.NoError(t, os.WriteFile(filePath, []byte("data"), fs.FileMode(0o644)))
|
||||
|
||||
service := NewAppImagesService(map[string]string{"background": "webp"})
|
||||
|
||||
path, mimeType, err := service.GetImage("background")
|
||||
store, err := storage.NewFilesystemStorage(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, filePath, path)
|
||||
|
||||
require.NoError(t, store.Save(context.Background(), path.Join("application-images", "background.webp"), bytes.NewReader([]byte("data"))))
|
||||
|
||||
service := NewAppImagesService(map[string]string{"background": "webp"}, store)
|
||||
|
||||
reader, size, mimeType, err := service.GetImage(context.Background(), "background")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
payload, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("data"), payload)
|
||||
require.Equal(t, int64(len(payload)), size)
|
||||
require.Equal(t, "image/webp", mimeType)
|
||||
}
|
||||
|
||||
func TestAppImagesService_UpdateImage(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
originalUploadPath := common.EnvConfig.UploadPath
|
||||
common.EnvConfig.UploadPath = tempDir
|
||||
t.Cleanup(func() {
|
||||
common.EnvConfig.UploadPath = originalUploadPath
|
||||
})
|
||||
store, err := storage.NewFilesystemStorage(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
imagesDir := filepath.Join(tempDir, "application-images")
|
||||
require.NoError(t, os.MkdirAll(imagesDir, 0o755))
|
||||
require.NoError(t, store.Save(context.Background(), path.Join("application-images", "logoLight.svg"), bytes.NewReader([]byte("old"))))
|
||||
|
||||
oldPath := filepath.Join(imagesDir, "logoLight.svg")
|
||||
require.NoError(t, os.WriteFile(oldPath, []byte("old"), fs.FileMode(0o644)))
|
||||
|
||||
service := NewAppImagesService(map[string]string{"logoLight": "svg"})
|
||||
service := NewAppImagesService(map[string]string{"logoLight": "svg"}, store)
|
||||
|
||||
fileHeader := newFileHeader(t, "logoLight.png", []byte("new"))
|
||||
|
||||
require.NoError(t, service.UpdateImage(fileHeader, "logoLight"))
|
||||
require.NoError(t, service.UpdateImage(context.Background(), fileHeader, "logoLight"))
|
||||
|
||||
_, err := os.Stat(filepath.Join(imagesDir, "logoLight.png"))
|
||||
reader, _, err := store.Open(context.Background(), path.Join("application-images", "logoLight.png"))
|
||||
require.NoError(t, err)
|
||||
_ = reader.Close()
|
||||
|
||||
_, _, err = store.Open(context.Background(), path.Join("application-images", "logoLight.svg"))
|
||||
require.ErrorIs(t, err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
func TestAppImagesService_ErrorsAndFlags(t *testing.T) {
|
||||
store, err := storage.NewFilesystemStorage(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = os.Stat(oldPath)
|
||||
require.ErrorIs(t, err, os.ErrNotExist)
|
||||
service := NewAppImagesService(map[string]string{}, store)
|
||||
|
||||
t.Run("get missing image returns not found", func(t *testing.T) {
|
||||
_, _, _, err := service.GetImage(context.Background(), "missing")
|
||||
require.Error(t, err)
|
||||
var imageErr *common.ImageNotFoundError
|
||||
assert.ErrorAs(t, err, &imageErr)
|
||||
})
|
||||
|
||||
t.Run("reject unsupported file types", func(t *testing.T) {
|
||||
err := service.UpdateImage(context.Background(), newFileHeader(t, "logo.txt", []byte("nope")), "logo")
|
||||
require.Error(t, err)
|
||||
var fileTypeErr *common.FileTypeNotSupportedError
|
||||
assert.ErrorAs(t, err, &fileTypeErr)
|
||||
})
|
||||
|
||||
t.Run("delete and extension tracking", func(t *testing.T) {
|
||||
require.NoError(t, store.Save(context.Background(), path.Join("application-images", "default-profile-picture.png"), bytes.NewReader([]byte("img"))))
|
||||
service.extensions["default-profile-picture"] = "png"
|
||||
|
||||
require.NoError(t, service.DeleteImage(context.Background(), "default-profile-picture"))
|
||||
assert.False(t, service.IsDefaultProfilePictureSet())
|
||||
|
||||
err := service.DeleteImage(context.Background(), "default-profile-picture")
|
||||
require.Error(t, err)
|
||||
var imageErr *common.ImageNotFoundError
|
||||
assert.ErrorAs(t, err, &imageErr)
|
||||
})
|
||||
}
|
||||
|
||||
func newFileHeader(t *testing.T, filename string, content []byte) *multipart.FileHeader {
|
||||
|
||||
296
backend/internal/service/app_lock_service.go
Normal file
296
backend/internal/service/app_lock_service.go
Normal file
@@ -0,0 +1,296 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLockUnavailable = errors.New("lock is already held by another process")
|
||||
ErrLockLost = errors.New("lock ownership lost")
|
||||
)
|
||||
|
||||
const (
|
||||
ttl = 30 * time.Second
|
||||
renewInterval = 20 * time.Second
|
||||
renewRetries = 3
|
||||
lockKey = "application_lock"
|
||||
)
|
||||
|
||||
type AppLockService struct {
|
||||
db *gorm.DB
|
||||
lockID string
|
||||
processID int64
|
||||
hostID string
|
||||
}
|
||||
|
||||
func NewAppLockService(db *gorm.DB) *AppLockService {
|
||||
host, err := os.Hostname()
|
||||
if err != nil || host == "" {
|
||||
host = "unknown-host"
|
||||
}
|
||||
|
||||
return &AppLockService{
|
||||
db: db,
|
||||
processID: int64(os.Getpid()),
|
||||
hostID: host,
|
||||
lockID: uuid.NewString(),
|
||||
}
|
||||
}
|
||||
|
||||
type lockValue struct {
|
||||
ProcessID int64 `json:"process_id"`
|
||||
HostID string `json:"host_id"`
|
||||
LockID string `json:"lock_id"`
|
||||
ExpiresAt int64 `json:"expires_at"`
|
||||
}
|
||||
|
||||
func (lv *lockValue) Marshal() (string, error) {
|
||||
data, err := json.Marshal(lv)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func (lv *lockValue) Unmarshal(raw string) error {
|
||||
if raw == "" {
|
||||
return nil
|
||||
}
|
||||
return json.Unmarshal([]byte(raw), lv)
|
||||
}
|
||||
|
||||
// Acquire obtains the lock. When force is true, the lock is stolen from any existing owner.
|
||||
// If the lock is forcefully acquired, it blocks until the previous lock has expired.
|
||||
func (s *AppLockService) Acquire(ctx context.Context, force bool) (waitUntil time.Time, err error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
var prevLockRaw string
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Model(&model.KV{}).
|
||||
Where("key = ?", lockKey).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Select("value").
|
||||
Scan(&prevLockRaw).
|
||||
Error
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("query existing lock: %w", err)
|
||||
}
|
||||
|
||||
var prevLock lockValue
|
||||
if prevLockRaw != "" {
|
||||
if err := prevLock.Unmarshal(prevLockRaw); err != nil {
|
||||
return time.Time{}, fmt.Errorf("decode existing lock value: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
nowUnix := now.Unix()
|
||||
|
||||
value := lockValue{
|
||||
ProcessID: s.processID,
|
||||
HostID: s.hostID,
|
||||
LockID: s.lockID,
|
||||
ExpiresAt: now.Add(ttl).Unix(),
|
||||
}
|
||||
raw, err := value.Marshal()
|
||||
if err != nil {
|
||||
return time.Time{}, fmt.Errorf("encode lock value: %w", err)
|
||||
}
|
||||
|
||||
var query string
|
||||
switch s.db.Name() {
|
||||
case "sqlite":
|
||||
query = `
|
||||
INSERT INTO kv (key, value)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT(key) DO UPDATE SET
|
||||
value = excluded.value
|
||||
WHERE (json_extract(kv.value, '$.expires_at') < ?) OR ?
|
||||
`
|
||||
case "postgres":
|
||||
query = `
|
||||
INSERT INTO kv (key, value)
|
||||
VALUES ($1, $2)
|
||||
ON CONFLICT(key) DO UPDATE SET
|
||||
value = excluded.value
|
||||
WHERE ((kv.value::json->>'expires_at')::bigint < $3) OR ($4::boolean IS TRUE)
|
||||
`
|
||||
default:
|
||||
return time.Time{}, fmt.Errorf("unsupported database dialect: %s", s.db.Name())
|
||||
}
|
||||
|
||||
res := tx.WithContext(ctx).Exec(query, lockKey, raw, nowUnix, force)
|
||||
if res.Error != nil {
|
||||
return time.Time{}, fmt.Errorf("lock acquisition failed: %w", res.Error)
|
||||
}
|
||||
|
||||
if err := tx.Commit().Error; err != nil {
|
||||
return time.Time{}, fmt.Errorf("commit lock acquisition: %w", err)
|
||||
}
|
||||
|
||||
// If there is a lock that is not expired and force is false, no rows will be affected
|
||||
if res.RowsAffected == 0 {
|
||||
return time.Time{}, ErrLockUnavailable
|
||||
}
|
||||
|
||||
if force && prevLock.ExpiresAt > nowUnix && prevLock.LockID != s.lockID {
|
||||
waitUntil = time.Unix(prevLock.ExpiresAt, 0)
|
||||
}
|
||||
|
||||
attrs := []any{
|
||||
slog.Int64("process_id", s.processID),
|
||||
slog.String("host_id", s.hostID),
|
||||
}
|
||||
if wait := time.Until(waitUntil); wait > 0 {
|
||||
attrs = append(attrs, slog.Duration("wait_before_proceeding", wait))
|
||||
}
|
||||
slog.Info("Acquired application lock", attrs...)
|
||||
|
||||
return waitUntil, nil
|
||||
}
|
||||
|
||||
// RunRenewal keeps renewing the lock until the context is canceled.
|
||||
func (s *AppLockService) RunRenewal(ctx context.Context) error {
|
||||
ticker := time.NewTicker(renewInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
if err := s.renew(ctx); err != nil {
|
||||
return fmt.Errorf("renew lock: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Release releases the lock if it is held by this process.
|
||||
func (s *AppLockService) Release(ctx context.Context) error {
|
||||
opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var query string
|
||||
switch s.db.Name() {
|
||||
case "sqlite":
|
||||
query = `
|
||||
DELETE FROM kv
|
||||
WHERE key = ?
|
||||
AND json_extract(value, '$.lock_id') = ?
|
||||
`
|
||||
case "postgres":
|
||||
query = `
|
||||
DELETE FROM kv
|
||||
WHERE key = $1
|
||||
AND value::json->>'lock_id' = $2
|
||||
`
|
||||
default:
|
||||
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
|
||||
}
|
||||
|
||||
res := s.db.WithContext(opCtx).Exec(query, lockKey, s.lockID)
|
||||
if res.Error != nil {
|
||||
return fmt.Errorf("release lock failed: %w", res.Error)
|
||||
}
|
||||
|
||||
if res.RowsAffected == 0 {
|
||||
slog.Warn("Application lock not held by this process, cannot release",
|
||||
slog.Int64("process_id", s.processID),
|
||||
slog.String("host_id", s.hostID),
|
||||
)
|
||||
}
|
||||
|
||||
slog.Info("Released application lock",
|
||||
slog.Int64("process_id", s.processID),
|
||||
slog.String("host_id", s.hostID),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// renew tries to renew the lock, retrying up to renewRetries times (sleeping 1s between attempts).
|
||||
func (s *AppLockService) renew(ctx context.Context) error {
|
||||
var lastErr error
|
||||
for attempt := 1; attempt <= renewRetries; attempt++ {
|
||||
now := time.Now()
|
||||
nowUnix := now.Unix()
|
||||
expiresAt := now.Add(ttl).Unix()
|
||||
|
||||
value := lockValue{
|
||||
LockID: s.lockID,
|
||||
ProcessID: s.processID,
|
||||
HostID: s.hostID,
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
raw, err := value.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("encode lock value: %w", err)
|
||||
}
|
||||
|
||||
var query string
|
||||
switch s.db.Name() {
|
||||
case "sqlite":
|
||||
query = `
|
||||
UPDATE kv
|
||||
SET value = ?
|
||||
WHERE key = ?
|
||||
AND json_extract(value, '$.lock_id') = ?
|
||||
AND json_extract(value, '$.expires_at') > ?
|
||||
`
|
||||
case "postgres":
|
||||
query = `
|
||||
UPDATE kv
|
||||
SET value = $1
|
||||
WHERE key = $2
|
||||
AND value::json->>'lock_id' = $3
|
||||
AND ((value::json->>'expires_at')::bigint > $4)
|
||||
`
|
||||
default:
|
||||
return fmt.Errorf("unsupported database dialect: %s", s.db.Name())
|
||||
}
|
||||
|
||||
opCtx, cancel := context.WithTimeout(ctx, 3*time.Second)
|
||||
res := s.db.WithContext(opCtx).Exec(query, raw, lockKey, s.lockID, nowUnix)
|
||||
cancel()
|
||||
|
||||
switch {
|
||||
case res.Error != nil:
|
||||
lastErr = fmt.Errorf("lock renewal failed: %w", res.Error)
|
||||
case res.RowsAffected == 0:
|
||||
// Must be after checking res.Error
|
||||
return ErrLockLost
|
||||
default:
|
||||
slog.Debug("Renewed application lock",
|
||||
slog.Int64("process_id", s.processID),
|
||||
slog.String("host_id", s.hostID),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait before next attempt or cancel if context is done
|
||||
if attempt < renewRetries {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(1 * time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastErr
|
||||
}
|
||||
189
backend/internal/service/app_lock_service_test.go
Normal file
189
backend/internal/service/app_lock_service_test.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
testutils "github.com/pocket-id/pocket-id/backend/internal/utils/testing"
|
||||
)
|
||||
|
||||
func newTestAppLockService(t *testing.T, db *gorm.DB) *AppLockService {
|
||||
t.Helper()
|
||||
|
||||
return &AppLockService{
|
||||
db: db,
|
||||
processID: 1,
|
||||
hostID: "test-host",
|
||||
lockID: "a13c7673-c7ae-49f1-9112-2cd2d0d4b0c1",
|
||||
}
|
||||
}
|
||||
|
||||
func insertLock(t *testing.T, db *gorm.DB, value lockValue) {
|
||||
t.Helper()
|
||||
|
||||
raw, err := value.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = db.Create(&model.KV{Key: lockKey, Value: &raw}).Error
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func readLockValue(t *testing.T, db *gorm.DB) lockValue {
|
||||
t.Helper()
|
||||
|
||||
var row model.KV
|
||||
err := db.Take(&row, "key = ?", lockKey).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotNil(t, row.Value)
|
||||
|
||||
var value lockValue
|
||||
err = value.Unmarshal(*row.Value)
|
||||
require.NoError(t, err)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func TestAppLockServiceAcquire(t *testing.T) {
|
||||
t.Run("creates new lock when none exists", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
_, err := service.Acquire(context.Background(), false)
|
||||
require.NoError(t, err)
|
||||
|
||||
stored := readLockValue(t, db)
|
||||
require.Equal(t, service.processID, stored.ProcessID)
|
||||
require.Equal(t, service.hostID, stored.HostID)
|
||||
require.Greater(t, stored.ExpiresAt, time.Now().Unix())
|
||||
})
|
||||
|
||||
t.Run("returns ErrLockUnavailable when lock held by another process", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
existing := lockValue{
|
||||
ProcessID: 99,
|
||||
HostID: "other-host",
|
||||
ExpiresAt: time.Now().Add(ttl).Unix(),
|
||||
}
|
||||
insertLock(t, db, existing)
|
||||
|
||||
_, err := service.Acquire(context.Background(), false)
|
||||
require.ErrorIs(t, err, ErrLockUnavailable)
|
||||
|
||||
current := readLockValue(t, db)
|
||||
require.Equal(t, existing, current)
|
||||
})
|
||||
|
||||
t.Run("force acquisition steals lock", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
insertLock(t, db, lockValue{
|
||||
ProcessID: 99,
|
||||
HostID: "other-host",
|
||||
ExpiresAt: time.Now().Unix(),
|
||||
})
|
||||
|
||||
_, err := service.Acquire(context.Background(), true)
|
||||
require.NoError(t, err)
|
||||
|
||||
stored := readLockValue(t, db)
|
||||
require.Equal(t, service.processID, stored.ProcessID)
|
||||
require.Equal(t, service.hostID, stored.HostID)
|
||||
require.Greater(t, stored.ExpiresAt, time.Now().Unix())
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppLockServiceRelease(t *testing.T) {
|
||||
t.Run("removes owned lock", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
_, err := service.Acquire(context.Background(), false)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = service.Release(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
var row model.KV
|
||||
err = db.Take(&row, "key = ?", lockKey).Error
|
||||
require.ErrorIs(t, err, gorm.ErrRecordNotFound)
|
||||
})
|
||||
|
||||
t.Run("ignores lock held by another owner", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
existing := lockValue{
|
||||
ProcessID: 2,
|
||||
HostID: "other-host",
|
||||
ExpiresAt: time.Now().Add(ttl).Unix(),
|
||||
}
|
||||
insertLock(t, db, existing)
|
||||
|
||||
err := service.Release(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
stored := readLockValue(t, db)
|
||||
require.Equal(t, existing, stored)
|
||||
})
|
||||
}
|
||||
|
||||
func TestAppLockServiceRenew(t *testing.T) {
|
||||
t.Run("extends expiration when lock is still owned", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
_, err := service.Acquire(context.Background(), false)
|
||||
require.NoError(t, err)
|
||||
|
||||
before := readLockValue(t, db)
|
||||
|
||||
err = service.renew(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
after := readLockValue(t, db)
|
||||
require.Equal(t, service.processID, after.ProcessID)
|
||||
require.Equal(t, service.hostID, after.HostID)
|
||||
require.GreaterOrEqual(t, after.ExpiresAt, before.ExpiresAt)
|
||||
})
|
||||
|
||||
t.Run("returns ErrLockLost when lock is missing", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
err := service.renew(context.Background())
|
||||
require.ErrorIs(t, err, ErrLockLost)
|
||||
})
|
||||
|
||||
t.Run("returns ErrLockLost when ownership changed", func(t *testing.T) {
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
service := newTestAppLockService(t, db)
|
||||
|
||||
_, err := service.Acquire(context.Background(), false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate a different process taking the lock.
|
||||
newOwner := lockValue{
|
||||
ProcessID: 9,
|
||||
HostID: "stolen-host",
|
||||
ExpiresAt: time.Now().Add(ttl).Unix(),
|
||||
}
|
||||
raw, marshalErr := newOwner.Marshal()
|
||||
require.NoError(t, marshalErr)
|
||||
updateErr := db.Model(&model.KV{}).
|
||||
Where("key = ?", lockKey).
|
||||
Update("value", raw).Error
|
||||
require.NoError(t, updateErr)
|
||||
|
||||
err = service.renew(context.Background())
|
||||
require.ErrorIs(t, err, ErrLockLost)
|
||||
})
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"log/slog"
|
||||
|
||||
userAgentParser "github.com/mileusna/useragent"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils/email"
|
||||
@@ -35,7 +34,7 @@ func (s *AuditLogService) Create(ctx context.Context, event model.AuditLogEvent,
|
||||
country, city, err := s.geoliteService.GetLocationByIP(ipAddress)
|
||||
if err != nil {
|
||||
// Log the error but don't interrupt the operation
|
||||
slog.Warn("Failed to get IP location", "error", err)
|
||||
slog.Warn("Failed to get IP location", slog.String("ip", ipAddress), slog.Any("error", err))
|
||||
}
|
||||
|
||||
auditLog := model.AuditLog{
|
||||
@@ -136,14 +135,14 @@ func (s *AuditLogService) CreateNewSignInWithEmail(ctx context.Context, ipAddres
|
||||
}
|
||||
|
||||
// ListAuditLogsForUser retrieves all audit logs for a given user ID
|
||||
func (s *AuditLogService) ListAuditLogsForUser(ctx context.Context, userID string, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.AuditLog, utils.PaginationResponse, error) {
|
||||
func (s *AuditLogService) ListAuditLogsForUser(ctx context.Context, userID string, listRequestOptions utils.ListRequestOptions) ([]model.AuditLog, utils.PaginationResponse, error) {
|
||||
var logs []model.AuditLog
|
||||
query := s.db.
|
||||
WithContext(ctx).
|
||||
Model(&model.AuditLog{}).
|
||||
Where("user_id = ?", userID)
|
||||
|
||||
pagination, err := utils.PaginateAndSort(sortedPaginationRequest, query, &logs)
|
||||
pagination, err := utils.PaginateFilterAndSort(listRequestOptions, query, &logs)
|
||||
return logs, pagination, err
|
||||
}
|
||||
|
||||
@@ -152,7 +151,7 @@ func (s *AuditLogService) DeviceStringFromUserAgent(userAgent string) string {
|
||||
return ua.Name + " on " + ua.OS + " " + ua.OSVersion
|
||||
}
|
||||
|
||||
func (s *AuditLogService) ListAllAuditLogs(ctx context.Context, sortedPaginationRequest utils.SortedPaginationRequest, filters dto.AuditLogFilterDto) ([]model.AuditLog, utils.PaginationResponse, error) {
|
||||
func (s *AuditLogService) ListAllAuditLogs(ctx context.Context, listRequestOptions utils.ListRequestOptions) ([]model.AuditLog, utils.PaginationResponse, error) {
|
||||
var logs []model.AuditLog
|
||||
|
||||
query := s.db.
|
||||
@@ -160,33 +159,36 @@ func (s *AuditLogService) ListAllAuditLogs(ctx context.Context, sortedPagination
|
||||
Preload("User").
|
||||
Model(&model.AuditLog{})
|
||||
|
||||
if filters.UserID != "" {
|
||||
query = query.Where("user_id = ?", filters.UserID)
|
||||
}
|
||||
if filters.Event != "" {
|
||||
query = query.Where("event = ?", filters.Event)
|
||||
}
|
||||
if filters.ClientName != "" {
|
||||
if clientName, ok := listRequestOptions.Filters["clientName"]; ok {
|
||||
dialect := s.db.Name()
|
||||
switch dialect {
|
||||
case "sqlite":
|
||||
query = query.Where("json_extract(data, '$.clientName') = ?", filters.ClientName)
|
||||
query = query.Where("json_extract(data, '$.clientName') IN ?", clientName)
|
||||
case "postgres":
|
||||
query = query.Where("data->>'clientName' = ?", filters.ClientName)
|
||||
query = query.Where("data->>'clientName' IN ?", clientName)
|
||||
default:
|
||||
return nil, utils.PaginationResponse{}, fmt.Errorf("unsupported database dialect: %s", dialect)
|
||||
}
|
||||
}
|
||||
if filters.Location != "" {
|
||||
switch filters.Location {
|
||||
case "external":
|
||||
query = query.Where("country != 'Internal Network'")
|
||||
case "internal":
|
||||
query = query.Where("country = 'Internal Network'")
|
||||
|
||||
if locations, ok := listRequestOptions.Filters["location"]; ok {
|
||||
mapped := make([]string, 0, len(locations))
|
||||
for _, v := range locations {
|
||||
if s, ok := v.(string); ok {
|
||||
switch s {
|
||||
case "internal":
|
||||
mapped = append(mapped, "Internal Network")
|
||||
case "external":
|
||||
mapped = append(mapped, "External Network")
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(mapped) > 0 {
|
||||
query = query.Where("country IN ?", mapped)
|
||||
}
|
||||
}
|
||||
|
||||
pagination, err := utils.PaginateAndSort(sortedPaginationRequest, query, &logs)
|
||||
pagination, err := utils.PaginateFilterAndSort(listRequestOptions, query, &logs)
|
||||
if err != nil {
|
||||
return nil, pagination, err
|
||||
}
|
||||
@@ -199,8 +201,8 @@ func (s *AuditLogService) ListUsernamesWithIds(ctx context.Context) (users map[s
|
||||
WithContext(ctx).
|
||||
Joins("User").
|
||||
Model(&model.AuditLog{}).
|
||||
Select("DISTINCT \"User\".id, \"User\".username").
|
||||
Where("\"User\".username IS NOT NULL")
|
||||
Select(`DISTINCT "User".id, "User".username`).
|
||||
Where(`"User".username IS NOT NULL`)
|
||||
|
||||
type Result struct {
|
||||
ID string `gorm:"column:id"`
|
||||
@@ -208,7 +210,8 @@ func (s *AuditLogService) ListUsernamesWithIds(ctx context.Context) (users map[s
|
||||
}
|
||||
|
||||
var results []Result
|
||||
if err := query.Find(&results).Error; err != nil {
|
||||
err = query.Find(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query user IDs: %w", err)
|
||||
}
|
||||
|
||||
@@ -244,7 +247,8 @@ func (s *AuditLogService) ListClientNames(ctx context.Context) (clientNames []st
|
||||
}
|
||||
|
||||
var results []Result
|
||||
if err := query.Find(&results).Error; err != nil {
|
||||
err = query.Find(&results).Error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query client IDs: %w", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,15 +7,12 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
"github.com/go-webauthn/webauthn/protocol"
|
||||
"github.com/lestrrat-go/jwx/v3/jwa"
|
||||
"github.com/lestrrat-go/jwx/v3/jwk"
|
||||
@@ -25,6 +22,7 @@ import (
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
jwkutils "github.com/pocket-id/pocket-id/backend/internal/utils/jwk"
|
||||
"github.com/pocket-id/pocket-id/backend/resources"
|
||||
@@ -35,15 +33,19 @@ type TestService struct {
|
||||
jwtService *JwtService
|
||||
appConfigService *AppConfigService
|
||||
ldapService *LdapService
|
||||
fileStorage storage.FileStorage
|
||||
appLockService *AppLockService
|
||||
externalIdPKey jwk.Key
|
||||
}
|
||||
|
||||
func NewTestService(db *gorm.DB, appConfigService *AppConfigService, jwtService *JwtService, ldapService *LdapService) (*TestService, error) {
|
||||
func NewTestService(db *gorm.DB, appConfigService *AppConfigService, jwtService *JwtService, ldapService *LdapService, appLockService *AppLockService, fileStorage storage.FileStorage) (*TestService, error) {
|
||||
s := &TestService{
|
||||
db: db,
|
||||
appConfigService: appConfigService,
|
||||
jwtService: jwtService,
|
||||
ldapService: ldapService,
|
||||
appLockService: appLockService,
|
||||
fileStorage: fileStorage,
|
||||
}
|
||||
err := s.initExternalIdP()
|
||||
if err != nil {
|
||||
@@ -96,6 +98,17 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
DisplayName: "Craig Federighi",
|
||||
IsAdmin: false,
|
||||
},
|
||||
{
|
||||
Base: model.Base{
|
||||
ID: "d9256384-98ad-49a7-bc58-99ad0b4dc23c",
|
||||
},
|
||||
Username: "eddy",
|
||||
Email: utils.Ptr("eddy.cue@test.com"),
|
||||
FirstName: "Eddy",
|
||||
LastName: "Cue",
|
||||
DisplayName: "Eddy Cue",
|
||||
IsAdmin: false,
|
||||
},
|
||||
}
|
||||
for _, user := range users {
|
||||
if err := tx.Create(&user).Error; err != nil {
|
||||
@@ -167,10 +180,11 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
Base: model.Base{
|
||||
ID: "606c7782-f2b1-49e5-8ea9-26eb1b06d018",
|
||||
},
|
||||
Name: "Immich",
|
||||
Secret: "$2a$10$Ak.FP8riD1ssy2AGGbG.gOpnp/rBpymd74j0nxNMtW0GG1Lb4gzxe", // PYjrE9u4v9GVqXKi52eur0eb2Ci4kc0x
|
||||
CallbackURLs: model.UrlList{"http://immich/auth/callback"},
|
||||
CreatedByID: utils.Ptr(users[1].ID),
|
||||
Name: "Immich",
|
||||
Secret: "$2a$10$Ak.FP8riD1ssy2AGGbG.gOpnp/rBpymd74j0nxNMtW0GG1Lb4gzxe", // PYjrE9u4v9GVqXKi52eur0eb2Ci4kc0x
|
||||
CallbackURLs: model.UrlList{"http://immich/auth/callback"},
|
||||
CreatedByID: utils.Ptr(users[1].ID),
|
||||
IsGroupRestricted: true,
|
||||
AllowedUserGroups: []model.UserGroup{
|
||||
userGroups[1],
|
||||
},
|
||||
@@ -183,6 +197,7 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
Secret: "$2a$10$xcRReBsvkI1XI6FG8xu/pOgzeF00bH5Wy4d/NThwcdi3ZBpVq/B9a", // n4VfQeXlTzA6yKpWbR9uJcMdSx2qH0Lo
|
||||
CallbackURLs: model.UrlList{"http://tailscale/auth/callback"},
|
||||
LogoutCallbackURLs: model.UrlList{"http://tailscale/auth/logout/callback"},
|
||||
IsGroupRestricted: true,
|
||||
CreatedByID: utils.Ptr(users[0].ID),
|
||||
},
|
||||
{
|
||||
@@ -205,6 +220,20 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Base: model.Base{
|
||||
ID: "c46d2090-37a0-4f2b-8748-6aa53b0c1afa",
|
||||
},
|
||||
Name: "SCIM Client",
|
||||
Secret: "$2a$10$h4wfa8gI7zavDAxwzSq1sOwYU4e8DwK1XZ8ZweNnY5KzlJ3Iz.qdK", // nQbiuMRG7FpdK2EnDd5MBivWQeKFXohn
|
||||
CallbackURLs: model.UrlList{"http://scimclient/auth/callback"},
|
||||
CreatedByID: utils.Ptr(users[0].ID),
|
||||
IsGroupRestricted: true,
|
||||
AllowedUserGroups: []model.UserGroup{
|
||||
userGroups[0],
|
||||
userGroups[1],
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, client := range oidcClients {
|
||||
if err := tx.Create(&client).Error; err != nil {
|
||||
@@ -286,8 +315,8 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
// openssl genpkey -algorithm EC -pkeyopt ec_paramgen_curve:P-256 | \
|
||||
// openssl pkcs8 -topk8 -nocrypt | tee >(openssl pkey -pubout)
|
||||
|
||||
publicKeyPasskey1, _ := s.getCborPublicKey("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEwcOo5KV169KR67QEHrcYkeXE3CCxv2BgwnSq4VYTQxyLtdmKxegexa8JdwFKhKXa2BMI9xaN15BoL6wSCRFJhg==")
|
||||
publicKeyPasskey2, _ := s.getCborPublicKey("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEj4qA0PrZzg8Co1C27nyUbzrp8Ewjr7eOlGI2LfrzmbL5nPhZRAdJ3hEaqrHMSnJBhfMqtQGKwDYpaLIQFAKLhw==")
|
||||
publicKeyPasskey1, _ := base64.StdEncoding.DecodeString("pQMmIAEhWCDBw6jkpXXr0pHrtAQetxiR5cTcILG/YGDCdKrhVhNDHCJYIIu12YrF6B7Frwl3AUqEpdrYEwj3Fo3XkGgvrBIJEUmGAQI=")
|
||||
publicKeyPasskey2, _ := base64.StdEncoding.DecodeString("pSJYIPmc+FlEB0neERqqscxKckGF8yq1AYrANiloshAUAouHAQIDJiABIVggj4qA0PrZzg8Co1C27nyUbzrp8Ewjr7eOlGI2LfrzmbI=")
|
||||
webauthnCredentials := []model.WebauthnCredential{
|
||||
{
|
||||
Name: "Passkey 1",
|
||||
@@ -316,6 +345,10 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
Challenge: "challenge",
|
||||
ExpiresAt: datatype.DateTime(time.Now().Add(1 * time.Hour)),
|
||||
UserVerification: "preferred",
|
||||
CredentialParams: model.CredentialParameters{
|
||||
{Type: "public-key", Algorithm: -7},
|
||||
{Type: "public-key", Algorithm: -257},
|
||||
},
|
||||
}
|
||||
if err := tx.Create(&webauthnSession).Error; err != nil {
|
||||
return err
|
||||
@@ -325,9 +358,10 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
Base: model.Base{
|
||||
ID: "5f1fa856-c164-4295-961e-175a0d22d725",
|
||||
},
|
||||
Name: "Test API Key",
|
||||
Key: "6c34966f57ef2bb7857649aff0e7ab3ad67af93c846342ced3f5a07be8706c20",
|
||||
UserID: users[0].ID,
|
||||
Name: "Test API Key",
|
||||
Key: "6c34966f57ef2bb7857649aff0e7ab3ad67af93c846342ced3f5a07be8706c20",
|
||||
UserID: users[0].ID,
|
||||
ExpiresAt: datatype.DateTime(time.Now().Add(30 * 24 * time.Hour)),
|
||||
}
|
||||
if err := tx.Create(&apiKey).Error; err != nil {
|
||||
return err
|
||||
@@ -342,6 +376,9 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
ExpiresAt: datatype.DateTime(time.Now().Add(24 * time.Hour)),
|
||||
UsageLimit: 1,
|
||||
UsageCount: 0,
|
||||
UserGroups: []model.UserGroup{
|
||||
userGroups[0],
|
||||
},
|
||||
},
|
||||
{
|
||||
Base: model.Base{
|
||||
@@ -377,6 +414,20 @@ func (s *TestService) SeedDatabase(baseURL string) error {
|
||||
}
|
||||
}
|
||||
|
||||
keyValues := []model.KV{
|
||||
{
|
||||
Key: jwkutils.PrivateKeyDBKey,
|
||||
// {"alg":"RS256","d":"mvMDWSdPPvcum0c0iEHE2gbqtV2NKMmLwrl9E6K7g8lTV95SePLnW_bwyMPV7EGp7PQk3l17I5XRhFjze7GqTnFIOgKzMianPs7jv2ELtBMGK0xOPATgu1iGb70xZ6vcvuEfRyY3dJ0zr4jpUdVuXwKmx9rK4IdZn2dFCKfvSuspqIpz11RhF1ALrqDLkxGVv7ZwNh0_VhJZU9hcjG5l6xc7rQEKpPRkZp0IdjkGS8Z0FskoVaiRIWAbZuiVFB9WCW8k1czC4HQTPLpII01bUQx2ludbm0UlXRgVU9ptUUbU7GAImQqTOW8LfPGklEvcgzlIlR_oqw4P9yBxLi-yMQ","dp":"pvNCSnnhbo8Igw9psPR-DicxFnkXlu_ix4gpy6efTrxA-z1VDFDioJ814vKQNioYDzpyAP1gfMPhRkvG_q0hRZsJah3Sb9dfA-WkhSWY7lURQP4yIBTMU0PF_rEATuS7lRciYk1SOx5fqXZd3m_LP0vpBC4Ujlq6NAq6CIjCnms","dq":"TtUVGCCkPNgfOLmkYXu7dxxUCV5kB01-xAEK2OY0n0pG8vfDophH4_D_ZC7nvJ8J9uDhs_3JStexq1lIvaWtG99RNTChIEDzpdn6GH9yaVcb_eB4uJjrNm64FhF8PGCCwxA-xMCZMaARKwhMB2_IOMkxUbWboL3gnhJ2rDO_QO0","e":"AQAB","kid":"8uHDw3M6rf8","kty":"RSA","n":"yaeEL0VKoPBXIAaWXsUgmu05lAvEIIdJn0FX9lHh4JE5UY9B83C5sCNdhs9iSWzpeP11EVjWp8i3Yv2CF7c7u50BXnVBGtxpZpFC-585UXacoJ0chUmarL9GRFJcM1nPHBTFu68aRrn1rIKNHUkNaaxFo0NFGl_4EDDTO8HwawTjwkPoQlRzeByhlvGPVvwgB3Fn93B8QJ_cZhXKxJvjjrC_8Pk76heC_ntEMru71Ix77BoC3j2TuyiN7m9RNBW8BU5q6lKoIdvIeZfTFLzi37iufyfvMrJTixp9zhNB1NxlLCeOZl2MXegtiGqd2H3cbAyqoOiv9ihUWTfXj7SxJw","p":"_Yylc9e07CKdqNRD2EosMC2mrhrEa9j5oY_l00Qyy4-jmCA59Q9viyqvveRo0U7cRvFA5BWgWN6GGLh1DG3X-QBqVr0dnk3uzbobb55RYUXyPLuBZI2q6w2oasbiDwPdY7KpkVv_H-bpITQlyDvO8hhucA6rUV7F6KTQVz8M3Ms","q":"y5p3hch-7jJ21TkAhp_Vk1fLCAuD4tbErwQs2of9ja8sB4iJOs5Wn6HD3P7Mc8Plye7qaLHvzc8I5g0tPKWvC0DPd_FLPXiWwMVAzee3NUX_oGeJNOQp11y1w_KqdO9qZqHSEPZ3NcFL_SZMFgggxhM1uzRiPzsVN0lnD_6prZU","qi":"2Grt6uXHm61ji3xSdkBWNtUnj19vS1-7rFJp5SoYztVQVThf_W52BAiXKBdYZDRVoItC_VS2NvAOjeJjhYO_xQ_q3hK7MdtuXfEPpLnyXKkmWo3lrJ26wbeF6l05LexCkI7ShsOuSt-dsyaTJTszuKDIA6YOfWvfo3aVZmlWRaI","use":"sig"}
|
||||
Value: utils.Ptr("7d/5hl7diJ2rnFL14hEAQf9tzpu29aqXQ8jpJ2iqqKUNFZpdOkEpud0CmRv4H3r8yyk2u/Gqqj9klSy58DJkYXGF5PAYgLyoBIb7L3JXWRbxg4cQ3QJCug13l2OTmpAKoVc+rmX8c3j3h1sNqyJ+7Ql5sS0jSeyiYgIsFNCdnK5alBDyvtcpe/QDpklmP4JCeVpvmf2rLGplk3g5UO5ydJ8UiDXxfDmi+gF6NKJvrGnnah8Ar3G/x88z+tTJtp0DIQFwxXwUM2XZqzEVGm8K2r0w5o9/Keh6bBBaiuH2C78ZOaijGV3DovhR+e9J0cYUYGwT42MZMx9fSWQ/lvWGGnf+Uq3MXJfjWSREfhkp8KTQwR9F7+dnVJWswOEk7jPR8I7hCWTMxJyvaFX3wgAXIVmhrgXZQQbYOqTt56IoqUl0xOJku8dA8opg2UcLlmmuOh6+hfkXKsiiS/H/9c1BVIGj1fCOiT6IePh4wKKSTbwJnPD5EKmdJpgTsUpjcDnXQKY4ReO0UpdRdKxwRDDLeQuG6j+ljGxR9GPudCU9Nmci6rFVI6n5LWYkQxBA1O73RpmXRZPDzntDfpXMEonkmSvOoxaCK2Id7CRKMdqvR0kEouwnhk5WSFtsfi3sA0pkXzPFxwZeWM8vFtbffZOZzXaOhxCOfcj1NClZohlZhyc4jvkxmrpY7PSaAzih0AmHI7y0LYFi6fZu/K4EheVa1+KF55nWZ8ARikHMWKAKkyExkTak7xyN884TDmzURRaPlQg4jzQte5WMNjAG/hlHibdMBNvgwiYd49ZxteJ8ABdbiXVRl+2JGbdjl2ubpQZwOn7bJKlqO56bIwsZ+e4+pXsuOGdBahkHrUjtMEmH3DZbGc6CJLbcmdhdpApLQRRcLAazxJhzAwJ47FRYsHsj57LnYNvmcKdIxw8rxCdLUuzz95uw0T3ankEO5J9sjem+HMEuKdwXK1UcuOn2rjR8Sd/BuvQmeso27dFbPXqXYNS90Ml45YyTvcKSiopD181oZR703TFUSpR7dsiqROMr+p/2jN9h6a8WbQ8xpksyclaQByY/M77AssbXnG6wfhRsntNIINCZLbBnjXOyz6ZHIC5K4tSTdcnWaiYPeRPQmnw9UUvHAcNU2yMWsy0eU377yDS0WstTxOdQutTdkczl8kv5Lo26JiEK7mSIuRK19ffF9Zz8FG8+eKv5zdyIPjyQRDYBysUoDv5huKe2eoxJu/MWS2Pql/ZtUGeD6Ozm3mCvh0vQ9ceagBkY6Ocm3du0ziAKP29Ri0mjg4DizVorbLzsh+EQH/s2Pi9MnjUZDlEmuLl2Xfp7/w4j/8u0N0tVR70VDFuGdKpTjFY3vS8EJrPtyMTM51x1D9rb8gIql8aR/rJw4YF+huxg1mv5n6+tGVqg5msbPmF12eJijP4lkmaRwIpLW5pJTtaDkUj7uOeu1mm4k+Dt5nh0/0jPHzrv6bcTCcbV7UjMHDoTXXqEpFAAJ66rHR7zdAJu+YKsnTIZyLmOpcowq7LL8G9qTvV0OSpyQWUIavRSgbDHFqEqRs+JU94jAzkq8nCY5MTd9m5sIv9InfdT3k+pwpsE/FKge8nghFLtbUrafGkzTky8SE2druvVcIvbfXMfLIKRUYjJgnWc0gQzF5J6pzXM7D2r/RG6JDzASqjlbURq6v9bhNerlOVdMujWKEEVcKWIzlbt4RkihRjM8AUqIZQOyicGQ+4yfIjAHw5viuABONYs3OIWULnFqJxdvS9rNKhfxSjIq9cfqyzevq2xrRoMXEonobh6M3bD2Vang8OAeVeD1OXWPERi4pepCYFS9RJ/Xa/UWxptsqSNuGcb3fAzQSmLpXLGdWRoKXvSe7EYgc0bGcLOjSTu5RURKo+EF9i4KT9EJauf6VXw5dTf/CCIJRXE1bWzXhSCFYntohYhX2ldOCDYpi/jFBC6Vtkw0ud3/xq8Nmhd5gUk+SpngByCZH3Pm3H+jvlbMpiqkDkm1v74hDX13Xhrcw2eWyuqKBVoRCCniUvwpYNbGvBfjC6Hcizv0Aybciwj+4nybt5EPoEUm6S6Gs7fG7QpPdvrzpAxX70MlmdkF/gwyuhbEeJhLK+WL7qAsN5CvHPzVbsIf90x+nGTtMJPgpxVr0tJMj+vprXV4WxutfARBiOnqe58MhA857sd+MzKBgKnoLOBRTiC3qc/0/ULwbG2HCCD7nmwzz7M4nUuMvo8rgS7z0BF68OClT8X3JwSXbL5Wg=="),
|
||||
},
|
||||
}
|
||||
|
||||
for _, kv := range keyValues {
|
||||
if err := tx.Create(&kv).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -424,8 +475,9 @@ func (s *TestService) ResetDatabase() error {
|
||||
}
|
||||
|
||||
func (s *TestService) ResetApplicationImages(ctx context.Context) error {
|
||||
if err := os.RemoveAll(common.EnvConfig.UploadPath); err != nil {
|
||||
slog.ErrorContext(ctx, "Error removing directory", slog.Any("error", err))
|
||||
err := s.fileStorage.DeleteAll(ctx, "/")
|
||||
if err != nil {
|
||||
slog.ErrorContext(ctx, "Error removing uploads", slog.Any("error", err))
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -435,13 +487,20 @@ func (s *TestService) ResetApplicationImages(ctx context.Context) error {
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
srcFilePath := filepath.Join("images", file.Name())
|
||||
destFilePath := filepath.Join(common.EnvConfig.UploadPath, "application-images", file.Name())
|
||||
|
||||
err := utils.CopyEmbeddedFileToDisk(srcFilePath, destFilePath)
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
srcFilePath := path.Join("images", file.Name())
|
||||
srcFile, err := resources.FS.Open(srcFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = s.fileStorage.Save(ctx, path.Join("application-images", file.Name()), srcFile)
|
||||
if err != nil {
|
||||
srcFile.Close()
|
||||
return err
|
||||
}
|
||||
srcFile.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -454,47 +513,29 @@ func (s *TestService) ResetAppConfig(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Manually set instance ID
|
||||
err = s.appConfigService.UpdateAppConfigValues(ctx, "instanceId", "test-instance-id")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload the app config from the database after resetting the values
|
||||
return s.appConfigService.LoadDbConfig(ctx)
|
||||
err = s.appConfigService.LoadDbConfig(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reload the JWK
|
||||
if err := s.jwtService.LoadOrGenerateKey(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TestService) SetJWTKeys() {
|
||||
const privateKeyString = `{"alg":"RS256","d":"mvMDWSdPPvcum0c0iEHE2gbqtV2NKMmLwrl9E6K7g8lTV95SePLnW_bwyMPV7EGp7PQk3l17I5XRhFjze7GqTnFIOgKzMianPs7jv2ELtBMGK0xOPATgu1iGb70xZ6vcvuEfRyY3dJ0zr4jpUdVuXwKmx9rK4IdZn2dFCKfvSuspqIpz11RhF1ALrqDLkxGVv7ZwNh0_VhJZU9hcjG5l6xc7rQEKpPRkZp0IdjkGS8Z0FskoVaiRIWAbZuiVFB9WCW8k1czC4HQTPLpII01bUQx2ludbm0UlXRgVU9ptUUbU7GAImQqTOW8LfPGklEvcgzlIlR_oqw4P9yBxLi-yMQ","dp":"pvNCSnnhbo8Igw9psPR-DicxFnkXlu_ix4gpy6efTrxA-z1VDFDioJ814vKQNioYDzpyAP1gfMPhRkvG_q0hRZsJah3Sb9dfA-WkhSWY7lURQP4yIBTMU0PF_rEATuS7lRciYk1SOx5fqXZd3m_LP0vpBC4Ujlq6NAq6CIjCnms","dq":"TtUVGCCkPNgfOLmkYXu7dxxUCV5kB01-xAEK2OY0n0pG8vfDophH4_D_ZC7nvJ8J9uDhs_3JStexq1lIvaWtG99RNTChIEDzpdn6GH9yaVcb_eB4uJjrNm64FhF8PGCCwxA-xMCZMaARKwhMB2_IOMkxUbWboL3gnhJ2rDO_QO0","e":"AQAB","kid":"8uHDw3M6rf8","kty":"RSA","n":"yaeEL0VKoPBXIAaWXsUgmu05lAvEIIdJn0FX9lHh4JE5UY9B83C5sCNdhs9iSWzpeP11EVjWp8i3Yv2CF7c7u50BXnVBGtxpZpFC-585UXacoJ0chUmarL9GRFJcM1nPHBTFu68aRrn1rIKNHUkNaaxFo0NFGl_4EDDTO8HwawTjwkPoQlRzeByhlvGPVvwgB3Fn93B8QJ_cZhXKxJvjjrC_8Pk76heC_ntEMru71Ix77BoC3j2TuyiN7m9RNBW8BU5q6lKoIdvIeZfTFLzi37iufyfvMrJTixp9zhNB1NxlLCeOZl2MXegtiGqd2H3cbAyqoOiv9ihUWTfXj7SxJw","p":"_Yylc9e07CKdqNRD2EosMC2mrhrEa9j5oY_l00Qyy4-jmCA59Q9viyqvveRo0U7cRvFA5BWgWN6GGLh1DG3X-QBqVr0dnk3uzbobb55RYUXyPLuBZI2q6w2oasbiDwPdY7KpkVv_H-bpITQlyDvO8hhucA6rUV7F6KTQVz8M3Ms","q":"y5p3hch-7jJ21TkAhp_Vk1fLCAuD4tbErwQs2of9ja8sB4iJOs5Wn6HD3P7Mc8Plye7qaLHvzc8I5g0tPKWvC0DPd_FLPXiWwMVAzee3NUX_oGeJNOQp11y1w_KqdO9qZqHSEPZ3NcFL_SZMFgggxhM1uzRiPzsVN0lnD_6prZU","qi":"2Grt6uXHm61ji3xSdkBWNtUnj19vS1-7rFJp5SoYztVQVThf_W52BAiXKBdYZDRVoItC_VS2NvAOjeJjhYO_xQ_q3hK7MdtuXfEPpLnyXKkmWo3lrJ26wbeF6l05LexCkI7ShsOuSt-dsyaTJTszuKDIA6YOfWvfo3aVZmlWRaI","use":"sig"}`
|
||||
|
||||
privateKey, _ := jwk.ParseKey([]byte(privateKeyString))
|
||||
_ = s.jwtService.SetKey(privateKey)
|
||||
}
|
||||
|
||||
// getCborPublicKey decodes a Base64 encoded public key and returns the CBOR encoded COSE key
|
||||
func (s *TestService) getCborPublicKey(base64PublicKey string) ([]byte, error) {
|
||||
decodedKey, err := base64.StdEncoding.DecodeString(base64PublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode base64 key: %w", err)
|
||||
}
|
||||
pubKey, err := x509.ParsePKIXPublicKey(decodedKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse public key: %w", err)
|
||||
}
|
||||
|
||||
ecdsaPubKey, ok := pubKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an ECDSA public key")
|
||||
}
|
||||
|
||||
coseKey := map[int]interface{}{
|
||||
1: 2, // Key type: EC2
|
||||
3: -7, // Algorithm: ECDSA with SHA-256
|
||||
-1: 1, // Curve: P-256
|
||||
-2: ecdsaPubKey.X.Bytes(), // X coordinate
|
||||
-3: ecdsaPubKey.Y.Bytes(), // Y coordinate
|
||||
}
|
||||
|
||||
cborPublicKey, err := cbor.Marshal(coseKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal COSE key: %w", err)
|
||||
}
|
||||
|
||||
return cborPublicKey, nil
|
||||
func (s *TestService) ResetLock(ctx context.Context) error {
|
||||
_, err := s.appLockService.Acquire(ctx, true)
|
||||
return err
|
||||
}
|
||||
|
||||
// SyncLdap triggers an LDAP synchronization
|
||||
@@ -521,7 +562,7 @@ func (s *TestService) SetLdapTestConfig(ctx context.Context) error {
|
||||
"ldapAttributeGroupUniqueIdentifier": "uuid",
|
||||
"ldapAttributeGroupName": "uid",
|
||||
"ldapAttributeGroupMember": "member",
|
||||
"ldapAttributeAdminGroup": "admin_group",
|
||||
"ldapAdminGroupName": "admin_group",
|
||||
"ldapSoftDeleteUsers": "true",
|
||||
"ldapEnabled": "true",
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func SendEmail[V any](ctx context.Context, srv *EmailService, toEmail email.Addr
|
||||
|
||||
data := &email.TemplateData[V]{
|
||||
AppName: dbConfig.AppName.Value,
|
||||
LogoURL: common.EnvConfig.AppURL + "/api/application-images/logo",
|
||||
LogoURL: common.EnvConfig.AppURL + "/api/application-images/email",
|
||||
Data: tData,
|
||||
}
|
||||
|
||||
@@ -282,16 +282,18 @@ func prepareBody[V any](srv *EmailService, template email.Template[V], data *ema
|
||||
|
||||
var htmlHeader = textproto.MIMEHeader{}
|
||||
htmlHeader.Add("Content-Type", "text/html; charset=UTF-8")
|
||||
htmlHeader.Add("Content-Transfer-Encoding", "8bit")
|
||||
htmlHeader.Add("Content-Transfer-Encoding", "quoted-printable")
|
||||
htmlPart, err := mpart.CreatePart(htmlHeader)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("create html part: %w", err)
|
||||
}
|
||||
|
||||
err = email.GetTemplate(srv.htmlTemplates, template).ExecuteTemplate(htmlPart, "root", data)
|
||||
htmlQp := quotedprintable.NewWriter(htmlPart)
|
||||
err = email.GetTemplate(srv.htmlTemplates, template).ExecuteTemplate(htmlQp, "root", data)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("execute html template: %w", err)
|
||||
}
|
||||
htmlQp.Close()
|
||||
|
||||
err = mpart.Close()
|
||||
if err != nil {
|
||||
|
||||
217
backend/internal/service/export_service.go
Normal file
217
backend/internal/service/export_service.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
// ExportService handles exporting Pocket ID data into a ZIP archive.
|
||||
type ExportService struct {
|
||||
db *gorm.DB
|
||||
storage storage.FileStorage
|
||||
}
|
||||
|
||||
func NewExportService(db *gorm.DB, storage storage.FileStorage) *ExportService {
|
||||
return &ExportService{
|
||||
db: db,
|
||||
storage: storage,
|
||||
}
|
||||
}
|
||||
|
||||
// ExportToZip performs the full export process and writes the ZIP data to the given writer.
|
||||
func (s *ExportService) ExportToZip(ctx context.Context, w io.Writer) error {
|
||||
dbData, err := s.extractDatabase()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.writeExportZipStream(ctx, w, dbData)
|
||||
}
|
||||
|
||||
// extractDatabase reads all tables into a DatabaseExport struct
|
||||
func (s *ExportService) extractDatabase() (DatabaseExport, error) {
|
||||
schema, err := utils.LoadDBSchemaTypes(s.db)
|
||||
if err != nil {
|
||||
return DatabaseExport{}, fmt.Errorf("failed to load schema types: %w", err)
|
||||
}
|
||||
|
||||
version, err := s.schemaVersion()
|
||||
if err != nil {
|
||||
return DatabaseExport{}, err
|
||||
}
|
||||
|
||||
out := DatabaseExport{
|
||||
Provider: s.db.Name(),
|
||||
Version: version,
|
||||
Tables: map[string][]map[string]any{},
|
||||
// These tables need to be inserted in a specific order because of foreign key constraints
|
||||
// Not all tables are listed here, because not all tables are order-dependent
|
||||
TableOrder: []string{"users", "user_groups", "oidc_clients", "signup_tokens"},
|
||||
}
|
||||
|
||||
for table := range schema {
|
||||
if table == "storage" || table == "schema_migrations" {
|
||||
continue
|
||||
}
|
||||
err = s.dumpTable(table, schema[table], &out)
|
||||
if err != nil {
|
||||
return DatabaseExport{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (s *ExportService) schemaVersion() (uint, error) {
|
||||
var version uint
|
||||
if err := s.db.Raw("SELECT version FROM schema_migrations").Row().Scan(&version); err != nil {
|
||||
return 0, fmt.Errorf("failed to query schema version: %w", err)
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// dumpTable selects all rows from a table and appends them to out.Tables
|
||||
func (s *ExportService) dumpTable(table string, types utils.DBSchemaTableTypes, out *DatabaseExport) error {
|
||||
rows, err := s.db.Raw("SELECT * FROM " + table).Rows()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read table %s: %w", table, err)
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
cols, _ := rows.Columns()
|
||||
if len(cols) != len(types) {
|
||||
// Should never happen...
|
||||
return fmt.Errorf("mismatched columns in table (%d) and schema (%d)", len(cols), len(types))
|
||||
}
|
||||
|
||||
for rows.Next() {
|
||||
vals := s.getScanValuesForTable(cols, types)
|
||||
err = rows.Scan(vals...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scan row in table %s: %w", table, err)
|
||||
}
|
||||
|
||||
rowMap := make(map[string]any, len(cols))
|
||||
for i, col := range cols {
|
||||
rowMap[col] = vals[i]
|
||||
}
|
||||
|
||||
// Skip the app lock row in the kv table
|
||||
if table == "kv" {
|
||||
if keyPtr, ok := rowMap["key"].(*string); ok && keyPtr != nil && *keyPtr == lockKey {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
out.Tables[table] = append(out.Tables[table], rowMap)
|
||||
}
|
||||
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func (s *ExportService) getScanValuesForTable(cols []string, types utils.DBSchemaTableTypes) []any {
|
||||
res := make([]any, len(cols))
|
||||
for i, col := range cols {
|
||||
// Store a pointer
|
||||
// Note: don't create a helper function for this switch, because it would return type "any" and mess everything up
|
||||
// If the column is nullable, we need a pointer to a pointer!
|
||||
switch types[col].Name {
|
||||
case "boolean", "bool":
|
||||
var x bool
|
||||
if types[col].Nullable {
|
||||
res[i] = utils.Ptr(utils.Ptr(x))
|
||||
} else {
|
||||
res[i] = utils.Ptr(x)
|
||||
}
|
||||
case "blob", "bytea", "jsonb":
|
||||
// Treat jsonb columns as binary too
|
||||
var x []byte
|
||||
if types[col].Nullable {
|
||||
res[i] = utils.Ptr(utils.Ptr(x))
|
||||
} else {
|
||||
res[i] = utils.Ptr(x)
|
||||
}
|
||||
case "timestamp", "timestamptz", "timestamp with time zone", "datetime":
|
||||
var x datatype.DateTime
|
||||
if types[col].Nullable {
|
||||
res[i] = utils.Ptr(utils.Ptr(x))
|
||||
} else {
|
||||
res[i] = utils.Ptr(x)
|
||||
}
|
||||
case "integer", "int", "bigint":
|
||||
var x int64
|
||||
if types[col].Nullable {
|
||||
res[i] = utils.Ptr(utils.Ptr(x))
|
||||
} else {
|
||||
res[i] = utils.Ptr(x)
|
||||
}
|
||||
default:
|
||||
// Treat everything else as a string (including the "numeric" type)
|
||||
var x string
|
||||
if types[col].Nullable {
|
||||
res[i] = utils.Ptr(utils.Ptr(x))
|
||||
} else {
|
||||
res[i] = utils.Ptr(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *ExportService) writeExportZipStream(ctx context.Context, w io.Writer, dbData DatabaseExport) error {
|
||||
zipWriter := zip.NewWriter(w)
|
||||
|
||||
// Add database.json
|
||||
jsonWriter, err := zipWriter.Create("database.json")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create database.json in zip: %w", err)
|
||||
}
|
||||
|
||||
jsonEncoder := json.NewEncoder(jsonWriter)
|
||||
jsonEncoder.SetEscapeHTML(false)
|
||||
|
||||
if err := jsonEncoder.Encode(dbData); err != nil {
|
||||
return fmt.Errorf("failed to encode database.json: %w", err)
|
||||
}
|
||||
|
||||
// Add uploaded files
|
||||
if err := s.addUploadsToZip(ctx, zipWriter); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return zipWriter.Close()
|
||||
}
|
||||
|
||||
// addUploadsToZip adds all files from the storage to the ZIP archive under the "uploads/" directory
|
||||
func (s *ExportService) addUploadsToZip(ctx context.Context, zipWriter *zip.Writer) error {
|
||||
return s.storage.Walk(ctx, "/", func(p storage.ObjectInfo) error {
|
||||
zipPath := filepath.Join("uploads", p.Path)
|
||||
|
||||
w, err := zipWriter.Create(zipPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create zip entry for %s: %w", zipPath, err)
|
||||
}
|
||||
|
||||
f, _, err := s.storage.Open(ctx, p.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file %s: %w", zipPath, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
return fmt.Errorf("failed to copy file %s into zip: %w", zipPath, err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
272
backend/internal/service/import_service.go
Normal file
272
backend/internal/service/import_service.go
Normal file
@@ -0,0 +1,272 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"gorm.io/gorm"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
// ImportService handles importing Pocket ID data from an exported ZIP archive.
|
||||
type ImportService struct {
|
||||
db *gorm.DB
|
||||
storage storage.FileStorage
|
||||
}
|
||||
|
||||
type DatabaseExport struct {
|
||||
Provider string `json:"provider"`
|
||||
Version uint `json:"version"`
|
||||
Tables map[string][]map[string]any `json:"tables"`
|
||||
TableOrder []string `json:"tableOrder"`
|
||||
}
|
||||
|
||||
func NewImportService(db *gorm.DB, storage storage.FileStorage) *ImportService {
|
||||
return &ImportService{
|
||||
db: db,
|
||||
storage: storage,
|
||||
}
|
||||
}
|
||||
|
||||
// ImportFromZip performs the full import process from the given ZIP reader.
|
||||
func (s *ImportService) ImportFromZip(ctx context.Context, r *zip.Reader) error {
|
||||
dbData, err := processZipDatabaseJson(r.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.ImportDatabase(dbData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.importUploads(ctx, r.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportDatabase only imports the database data from the given DatabaseExport struct.
|
||||
func (s *ImportService) ImportDatabase(dbData DatabaseExport) error {
|
||||
err := s.resetSchema(dbData.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = s.insertData(dbData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processZipDatabaseJson extracts database.json from the ZIP archive
|
||||
func processZipDatabaseJson(files []*zip.File) (dbData DatabaseExport, err error) {
|
||||
for _, f := range files {
|
||||
if f.Name == "database.json" {
|
||||
return parseDatabaseJsonStream(f)
|
||||
}
|
||||
}
|
||||
return dbData, errors.New("database.json not found in the ZIP file")
|
||||
}
|
||||
|
||||
func parseDatabaseJsonStream(f *zip.File) (dbData DatabaseExport, err error) {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return dbData, fmt.Errorf("failed to open database.json: %w", err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
err = json.NewDecoder(rc).Decode(&dbData)
|
||||
if err != nil {
|
||||
return dbData, fmt.Errorf("failed to decode database.json: %w", err)
|
||||
}
|
||||
|
||||
return dbData, nil
|
||||
}
|
||||
|
||||
// importUploads imports files from the uploads/ directory in the ZIP archive
|
||||
func (s *ImportService) importUploads(ctx context.Context, files []*zip.File) error {
|
||||
const maxFileSize = 50 << 20 // 50 MiB
|
||||
const uploadsPrefix = "uploads/"
|
||||
|
||||
for _, f := range files {
|
||||
if !strings.HasPrefix(f.Name, uploadsPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
if f.UncompressedSize64 > maxFileSize {
|
||||
return fmt.Errorf("file %s too large (%d bytes)", f.Name, f.UncompressedSize64)
|
||||
}
|
||||
|
||||
targetPath := strings.TrimPrefix(f.Name, uploadsPrefix)
|
||||
if strings.HasSuffix(f.Name, "/") || targetPath == "" {
|
||||
continue // Skip directories
|
||||
}
|
||||
|
||||
err := s.storage.DeleteAll(ctx, targetPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete existing file %s: %w", targetPath, err)
|
||||
}
|
||||
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf, err := io.ReadAll(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("read file %s: %w", f.Name, err)
|
||||
}
|
||||
|
||||
err = s.storage.Save(ctx, targetPath, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file %s: %w", targetPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// resetSchema drops the existing schema and migrates to the target version
|
||||
func (s *ImportService) resetSchema(targetVersion uint) error {
|
||||
sqlDb, err := s.db.DB()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get sql.DB: %w", err)
|
||||
}
|
||||
|
||||
m, err := utils.GetEmbeddedMigrateInstance(sqlDb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get migrate instance: %w", err)
|
||||
}
|
||||
|
||||
if s.db.Name() == "sqlite" {
|
||||
s.db.Exec("PRAGMA foreign_keys = OFF;")
|
||||
}
|
||||
|
||||
err = m.Drop()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to drop existing schema: %w", err)
|
||||
}
|
||||
|
||||
if s.db.Name() == "sqlite" {
|
||||
defer s.db.Exec("PRAGMA foreign_keys = ON;")
|
||||
}
|
||||
|
||||
// Needs to be called again to re-create the schema_migrations table
|
||||
m, err = utils.GetEmbeddedMigrateInstance(sqlDb)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get migrate instance: %w", err)
|
||||
}
|
||||
|
||||
err = m.Migrate(targetVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("migration failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// insertData populates the DB with the imported data
|
||||
func (s *ImportService) insertData(dbData DatabaseExport) error {
|
||||
schema, err := utils.LoadDBSchemaTypes(s.db)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load schema types: %w", err)
|
||||
}
|
||||
|
||||
return s.db.Transaction(func(tx *gorm.DB) error {
|
||||
// Iterate through all tables
|
||||
// Some tables need to be processed in order
|
||||
tables := make([]string, 0, len(dbData.Tables))
|
||||
tables = append(tables, dbData.TableOrder...)
|
||||
|
||||
for t := range dbData.Tables {
|
||||
// Skip tables already present where the order matters
|
||||
// Also skip the schema_migrations table
|
||||
if slices.Contains(dbData.TableOrder, t) || t == "schema_migrations" {
|
||||
continue
|
||||
}
|
||||
tables = append(tables, t)
|
||||
}
|
||||
|
||||
// Insert rows
|
||||
for _, table := range tables {
|
||||
for _, row := range dbData.Tables[table] {
|
||||
err = normalizeRowWithSchema(row, table, schema)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to normalize row for table '%s': %w", table, err)
|
||||
}
|
||||
err = tx.Table(table).Create(row).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed inserting into table '%s': %w", table, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// normalizeRowWithSchema converts row values based on the DB schema
|
||||
func normalizeRowWithSchema(row map[string]any, table string, schema utils.DBSchemaTypes) error {
|
||||
if schema[table] == nil {
|
||||
return fmt.Errorf("schema not found for table '%s'", table)
|
||||
}
|
||||
|
||||
for col, val := range row {
|
||||
if val == nil {
|
||||
// If the value is nil, skip the column
|
||||
continue
|
||||
}
|
||||
|
||||
colType := schema[table][col]
|
||||
|
||||
switch colType.Name {
|
||||
case "timestamp", "timestamptz", "timestamp with time zone", "datetime":
|
||||
// Dates are stored as strings
|
||||
str, ok := val.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("value for column '%s/%s' was expected to be a string, but was '%T'", table, col, val)
|
||||
}
|
||||
d, err := datatype.DateTimeFromString(str)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode value for column '%s/%s' as timestamp: %w", table, col, err)
|
||||
}
|
||||
row[col] = d
|
||||
|
||||
case "blob", "bytea", "jsonb":
|
||||
// Binary data and jsonb data is stored in the file as base64-encoded string
|
||||
str, ok := val.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("value for column '%s/%s' was expected to be a string, but was '%T'", table, col, val)
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(str)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode value for column '%s/%s' from base64: %w", table, col, err)
|
||||
}
|
||||
|
||||
// For jsonb, we additionally cast to json.RawMessage
|
||||
if colType.Name == "jsonb" {
|
||||
row[col] = json.RawMessage(b)
|
||||
} else {
|
||||
row[col] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -18,14 +18,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// PrivateKeyFile is the path in the data/keys folder where the key is stored
|
||||
// This is a JSON file containing a key encoded as JWK
|
||||
PrivateKeyFile = "jwt_private_key.json"
|
||||
|
||||
// PrivateKeyFileEncrypted is the path in the data/keys folder where the encrypted key is stored
|
||||
// This is a encrypted JSON file containing a key encoded as JWK
|
||||
PrivateKeyFileEncrypted = "jwt_private_key.json.enc"
|
||||
|
||||
// KeyUsageSigning is the usage for the private keys, for the "use" property
|
||||
KeyUsageSigning = "sig"
|
||||
|
||||
@@ -56,6 +48,7 @@ const (
|
||||
)
|
||||
|
||||
type JwtService struct {
|
||||
db *gorm.DB
|
||||
envConfig *common.EnvConfigSchema
|
||||
privateKey jwk.Key
|
||||
keyId string
|
||||
@@ -66,7 +59,6 @@ type JwtService struct {
|
||||
func NewJwtService(db *gorm.DB, appConfigService *AppConfigService) (*JwtService, error) {
|
||||
service := &JwtService{}
|
||||
|
||||
// Ensure keys are generated or loaded
|
||||
err := service.init(db, appConfigService, &common.EnvConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -78,14 +70,15 @@ func NewJwtService(db *gorm.DB, appConfigService *AppConfigService) (*JwtService
|
||||
func (s *JwtService) init(db *gorm.DB, appConfigService *AppConfigService, envConfig *common.EnvConfigSchema) (err error) {
|
||||
s.appConfigService = appConfigService
|
||||
s.envConfig = envConfig
|
||||
s.db = db
|
||||
|
||||
// Ensure keys are generated or loaded
|
||||
return s.loadOrGenerateKey(db)
|
||||
return s.LoadOrGenerateKey()
|
||||
}
|
||||
|
||||
func (s *JwtService) loadOrGenerateKey(db *gorm.DB) error {
|
||||
func (s *JwtService) LoadOrGenerateKey() error {
|
||||
// Get the key provider
|
||||
keyProvider, err := jwkutils.GetKeyProvider(db, s.envConfig, s.appConfigService.GetDbConfig().InstanceID.Value)
|
||||
keyProvider, err := jwkutils.GetKeyProvider(s.db, s.envConfig, s.appConfigService.GetDbConfig().InstanceID.Value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get key provider: %w", err)
|
||||
}
|
||||
@@ -93,7 +86,7 @@ func (s *JwtService) loadOrGenerateKey(db *gorm.DB) error {
|
||||
// Try loading a key
|
||||
key, err := keyProvider.LoadKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load key (provider type '%s'): %w", s.envConfig.KeysStorage, err)
|
||||
return fmt.Errorf("failed to load key: %w", err)
|
||||
}
|
||||
|
||||
// If we have a key, store it in the object and we're done
|
||||
@@ -114,7 +107,7 @@ func (s *JwtService) loadOrGenerateKey(db *gorm.DB) error {
|
||||
// Save the newly-generated key
|
||||
err = keyProvider.SaveKey(s.privateKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save private key (provider type '%s'): %w", s.envConfig.KeysStorage, err)
|
||||
return fmt.Errorf("failed to save private key: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,12 +11,14 @@ import (
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/go-ldap/ldap/v3"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
"gorm.io/gorm"
|
||||
@@ -32,15 +34,23 @@ type LdapService struct {
|
||||
appConfigService *AppConfigService
|
||||
userService *UserService
|
||||
groupService *UserGroupService
|
||||
fileStorage storage.FileStorage
|
||||
}
|
||||
|
||||
func NewLdapService(db *gorm.DB, httpClient *http.Client, appConfigService *AppConfigService, userService *UserService, groupService *UserGroupService) *LdapService {
|
||||
type savePicture struct {
|
||||
userID string
|
||||
username string
|
||||
picture string
|
||||
}
|
||||
|
||||
func NewLdapService(db *gorm.DB, httpClient *http.Client, appConfigService *AppConfigService, userService *UserService, groupService *UserGroupService, fileStorage storage.FileStorage) *LdapService {
|
||||
return &LdapService{
|
||||
db: db,
|
||||
httpClient: httpClient,
|
||||
appConfigService: appConfigService,
|
||||
userService: userService,
|
||||
groupService: groupService,
|
||||
fileStorage: fileStorage,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,12 +78,6 @@ func (s *LdapService) createClient() (*ldap.Conn, error) {
|
||||
}
|
||||
|
||||
func (s *LdapService) SyncAll(ctx context.Context) error {
|
||||
// Start a transaction
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
// Setup LDAP connection
|
||||
client, err := s.createClient()
|
||||
if err != nil {
|
||||
@@ -81,7 +85,13 @@ func (s *LdapService) SyncAll(ctx context.Context) error {
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
err = s.SyncUsers(ctx, tx, client)
|
||||
// Start a transaction
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
savePictures, deleteFiles, err := s.SyncUsers(ctx, tx, client)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sync users: %w", err)
|
||||
}
|
||||
@@ -97,6 +107,25 @@ func (s *LdapService) SyncAll(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to commit changes to database: %w", err)
|
||||
}
|
||||
|
||||
// Now that we've committed the transaction, we can perform operations on the storage layer
|
||||
// First, save all new pictures
|
||||
for _, sp := range savePictures {
|
||||
err = s.saveProfilePicture(ctx, sp.userID, sp.picture)
|
||||
if err != nil {
|
||||
// This is not a fatal error
|
||||
slog.Warn("Error saving profile picture for LDAP user", slog.String("username", sp.username), slog.Any("error", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Delete all old files
|
||||
for _, path := range deleteFiles {
|
||||
err = s.fileStorage.Delete(ctx, path)
|
||||
if err != nil {
|
||||
// This is not a fatal error
|
||||
slog.Error("Failed to delete file after LDAP sync", slog.String("path", path), slog.Any("error", err))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -266,7 +295,7 @@ func (s *LdapService) SyncGroups(ctx context.Context, tx *gorm.DB, client *ldap.
|
||||
}
|
||||
|
||||
//nolint:gocognit
|
||||
func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.Conn) error {
|
||||
func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.Conn) (savePictures []savePicture, deleteFiles []string, err error) {
|
||||
dbConfig := s.appConfigService.GetDbConfig()
|
||||
|
||||
searchAttrs := []string{
|
||||
@@ -294,11 +323,12 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
|
||||
|
||||
result, err := client.Search(searchReq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query LDAP: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to query LDAP: %w", err)
|
||||
}
|
||||
|
||||
// Create a mapping for users that exist
|
||||
ldapUserIDs := make(map[string]struct{}, len(result.Entries))
|
||||
savePictures = make([]savePicture, 0, len(result.Entries))
|
||||
|
||||
for _, value := range result.Entries {
|
||||
ldapId := convertLdapIdToString(value.GetAttributeValue(dbConfig.LdapAttributeUserUniqueIdentifier.Value))
|
||||
@@ -329,19 +359,19 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
|
||||
Error
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to enable user %s: %w", databaseUser.Username, err)
|
||||
return nil, nil, fmt.Errorf("failed to enable user %s: %w", databaseUser.Username, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// This could error with ErrRecordNotFound and we want to ignore that here
|
||||
return fmt.Errorf("failed to query for LDAP user ID '%s': %w", ldapId, err)
|
||||
return nil, nil, fmt.Errorf("failed to query for LDAP user ID '%s': %w", ldapId, err)
|
||||
}
|
||||
|
||||
// Check if user is admin by checking if they are in the admin group
|
||||
isAdmin := false
|
||||
for _, group := range value.GetAttributeValues("memberOf") {
|
||||
if getDNProperty(dbConfig.LdapAttributeGroupName.Value, group) == dbConfig.LdapAttributeAdminGroup.Value {
|
||||
if getDNProperty(dbConfig.LdapAttributeGroupName.Value, group) == dbConfig.LdapAdminGroupName.Value {
|
||||
isAdmin = true
|
||||
break
|
||||
}
|
||||
@@ -369,32 +399,35 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
|
||||
continue
|
||||
}
|
||||
|
||||
userID := databaseUser.ID
|
||||
if databaseUser.ID == "" {
|
||||
_, err = s.userService.createUserInternal(ctx, newUser, true, tx)
|
||||
createdUser, err := s.userService.createUserInternal(ctx, newUser, true, tx)
|
||||
if errors.Is(err, &common.AlreadyInUseError{}) {
|
||||
slog.Warn("Skipping creating LDAP user", slog.String("username", newUser.Username), slog.Any("error", err))
|
||||
continue
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error creating user '%s': %w", newUser.Username, err)
|
||||
return nil, nil, fmt.Errorf("error creating user '%s': %w", newUser.Username, err)
|
||||
}
|
||||
userID = createdUser.ID
|
||||
} else {
|
||||
_, err = s.userService.updateUserInternal(ctx, databaseUser.ID, newUser, false, true, tx)
|
||||
if errors.Is(err, &common.AlreadyInUseError{}) {
|
||||
slog.Warn("Skipping updating LDAP user", slog.String("username", newUser.Username), slog.Any("error", err))
|
||||
continue
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("error updating user '%s': %w", newUser.Username, err)
|
||||
return nil, nil, fmt.Errorf("error updating user '%s': %w", newUser.Username, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Save profile picture
|
||||
pictureString := value.GetAttributeValue(dbConfig.LdapAttributeUserProfilePicture.Value)
|
||||
if pictureString != "" {
|
||||
err = s.saveProfilePicture(ctx, databaseUser.ID, pictureString)
|
||||
if err != nil {
|
||||
// This is not a fatal error
|
||||
slog.Warn("Error saving profile picture for user", slog.String("username", newUser.Username), slog.Any("error", err))
|
||||
}
|
||||
// Storage operations must be executed outside of a transaction
|
||||
savePictures = append(savePictures, savePicture{
|
||||
userID: databaseUser.ID,
|
||||
username: userID,
|
||||
picture: pictureString,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,10 +439,11 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
|
||||
Select("id, username, ldap_id, disabled").
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch users from database: %w", err)
|
||||
return nil, nil, fmt.Errorf("failed to fetch users from database: %w", err)
|
||||
}
|
||||
|
||||
// Mark users as disabled or delete users that no longer exist in LDAP
|
||||
deleteFiles = make([]string, 0, len(ldapUserIDs))
|
||||
for _, user := range ldapUsersInDb {
|
||||
// Skip if the user ID exists in the fetched LDAP results
|
||||
if _, exists := ldapUserIDs[*user.LdapID]; exists {
|
||||
@@ -417,30 +451,34 @@ func (s *LdapService) SyncUsers(ctx context.Context, tx *gorm.DB, client *ldap.C
|
||||
}
|
||||
|
||||
if dbConfig.LdapSoftDeleteUsers.IsTrue() {
|
||||
err = s.userService.disableUserInternal(ctx, user.ID, tx)
|
||||
err = s.userService.disableUserInternal(ctx, tx, user.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to disable user %s: %w", user.Username, err)
|
||||
return nil, nil, fmt.Errorf("failed to disable user %s: %w", user.Username, err)
|
||||
}
|
||||
|
||||
slog.Info("Disabled user", slog.String("username", user.Username))
|
||||
} else {
|
||||
err = s.userService.deleteUserInternal(ctx, user.ID, true, tx)
|
||||
target := &common.LdapUserUpdateError{}
|
||||
if errors.As(err, &target) {
|
||||
return fmt.Errorf("failed to delete user %s: LDAP user must be disabled before deletion", user.Username)
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("failed to delete user %s: %w", user.Username, err)
|
||||
err = s.userService.deleteUserInternal(ctx, tx, user.ID, true)
|
||||
if err != nil {
|
||||
target := &common.LdapUserUpdateError{}
|
||||
if errors.As(err, &target) {
|
||||
return nil, nil, fmt.Errorf("failed to delete user %s: LDAP user must be disabled before deletion", user.Username)
|
||||
}
|
||||
return nil, nil, fmt.Errorf("failed to delete user %s: %w", user.Username, err)
|
||||
}
|
||||
|
||||
slog.Info("Deleted user", slog.String("username", user.Username))
|
||||
|
||||
// Storage operations must be executed outside of a transaction
|
||||
deleteFiles = append(deleteFiles, path.Join("profile-pictures", user.ID+".png"))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return savePictures, deleteFiles, nil
|
||||
}
|
||||
|
||||
func (s *LdapService) saveProfilePicture(parentCtx context.Context, userId string, pictureString string) error {
|
||||
var reader io.Reader
|
||||
var reader io.ReadSeeker
|
||||
|
||||
_, err := url.ParseRequestURI(pictureString)
|
||||
if err == nil {
|
||||
@@ -460,7 +498,12 @@ func (s *LdapService) saveProfilePicture(parentCtx context.Context, userId strin
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
reader = res.Body
|
||||
data, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read profile picture: %w", err)
|
||||
}
|
||||
|
||||
reader = bytes.NewReader(data)
|
||||
} else if decodedPhoto, err := base64.StdEncoding.DecodeString(pictureString); err == nil {
|
||||
// If the photo is a base64 encoded string, decode it
|
||||
reader = bytes.NewReader(decodedPhoto)
|
||||
@@ -470,7 +513,7 @@ func (s *LdapService) saveProfilePicture(parentCtx context.Context, userId strin
|
||||
}
|
||||
|
||||
// Update the profile picture
|
||||
err = s.userService.UpdateProfilePicture(userId, reader)
|
||||
err = s.userService.UpdateProfilePicture(parentCtx, userId, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update profile picture: %w", err)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package service
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -11,11 +12,9 @@ import (
|
||||
"io"
|
||||
"log/slog"
|
||||
"mime/multipart"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"path"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -33,6 +32,7 @@ import (
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
)
|
||||
|
||||
@@ -56,9 +56,11 @@ type OidcService struct {
|
||||
auditLogService *AuditLogService
|
||||
customClaimService *CustomClaimService
|
||||
webAuthnService *WebAuthnService
|
||||
scimService *ScimService
|
||||
|
||||
httpClient *http.Client
|
||||
jwkCache *jwk.Cache
|
||||
httpClient *http.Client
|
||||
jwkCache *jwk.Cache
|
||||
fileStorage storage.FileStorage
|
||||
}
|
||||
|
||||
func NewOidcService(
|
||||
@@ -69,7 +71,9 @@ func NewOidcService(
|
||||
auditLogService *AuditLogService,
|
||||
customClaimService *CustomClaimService,
|
||||
webAuthnService *WebAuthnService,
|
||||
scimService *ScimService,
|
||||
httpClient *http.Client,
|
||||
fileStorage storage.FileStorage,
|
||||
) (s *OidcService, err error) {
|
||||
s = &OidcService{
|
||||
db: db,
|
||||
@@ -78,7 +82,9 @@ func NewOidcService(
|
||||
auditLogService: auditLogService,
|
||||
customClaimService: customClaimService,
|
||||
webAuthnService: webAuthnService,
|
||||
scimService: scimService,
|
||||
httpClient: httpClient,
|
||||
fileStorage: fileStorage,
|
||||
}
|
||||
|
||||
// Note: we don't pass the HTTP Client with OTel instrumented to this because requests are always made in background and not tied to a specific trace
|
||||
@@ -165,7 +171,7 @@ func (s *OidcService) Authorize(ctx context.Context, input dto.AuthorizeOidcClie
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if !s.IsUserGroupAllowedToAuthorize(user, client) {
|
||||
if !IsUserGroupAllowedToAuthorize(user, client) {
|
||||
return "", "", &common.OidcAccessDeniedError{}
|
||||
}
|
||||
|
||||
@@ -221,8 +227,8 @@ func (s *OidcService) hasAuthorizedClientInternal(ctx context.Context, clientID,
|
||||
}
|
||||
|
||||
// IsUserGroupAllowedToAuthorize checks if the user group of the user is allowed to authorize the client
|
||||
func (s *OidcService) IsUserGroupAllowedToAuthorize(user model.User, client model.OidcClient) bool {
|
||||
if len(client.AllowedUserGroups) == 0 {
|
||||
func IsUserGroupAllowedToAuthorize(user model.User, client model.OidcClient) bool {
|
||||
if !client.IsGroupRestricted {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -308,7 +314,7 @@ func (s *OidcService) createTokenFromDeviceCode(ctx context.Context, input dto.O
|
||||
}
|
||||
|
||||
// Explicitly use the input clientID for the audience claim to ensure consistency
|
||||
idToken, err := s.jwtService.GenerateIDToken(userClaims, input.ClientID, "")
|
||||
idToken, err := s.jwtService.GenerateIDToken(userClaims, input.ClientID, deviceAuth.Nonce)
|
||||
if err != nil {
|
||||
return CreatedTokens{}, err
|
||||
}
|
||||
@@ -393,7 +399,7 @@ func (s *OidcService) createTokenFromAuthorizationCode(ctx context.Context, inpu
|
||||
|
||||
// If the client is public or PKCE is enabled, the code verifier must match the code challenge
|
||||
if client.IsPublic || client.PkceEnabled {
|
||||
if !s.validateCodeVerifier(input.CodeVerifier, *authorizationCodeMetaData.CodeChallenge, *authorizationCodeMetaData.CodeChallengeMethodSha256) {
|
||||
if !validateCodeVerifier(input.CodeVerifier, *authorizationCodeMetaData.CodeChallenge, *authorizationCodeMetaData.CodeChallengeMethodSha256) {
|
||||
return CreatedTokens{}, &common.OidcInvalidCodeVerifierError{}
|
||||
}
|
||||
}
|
||||
@@ -674,24 +680,26 @@ func (s *OidcService) introspectRefreshToken(ctx context.Context, clientID strin
|
||||
}
|
||||
|
||||
func (s *OidcService) GetClient(ctx context.Context, clientID string) (model.OidcClient, error) {
|
||||
return s.getClientInternal(ctx, clientID, s.db)
|
||||
return s.getClientInternal(ctx, clientID, s.db, false)
|
||||
}
|
||||
|
||||
func (s *OidcService) getClientInternal(ctx context.Context, clientID string, tx *gorm.DB) (model.OidcClient, error) {
|
||||
func (s *OidcService) getClientInternal(ctx context.Context, clientID string, tx *gorm.DB, forUpdate bool) (model.OidcClient, error) {
|
||||
var client model.OidcClient
|
||||
err := tx.
|
||||
q := tx.
|
||||
WithContext(ctx).
|
||||
Preload("CreatedBy").
|
||||
Preload("AllowedUserGroups").
|
||||
First(&client, "id = ?", clientID).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
Preload("AllowedUserGroups")
|
||||
if forUpdate {
|
||||
q = q.Clauses(clause.Locking{Strength: "UPDATE"})
|
||||
}
|
||||
q = q.First(&client, "id = ?", clientID)
|
||||
if q.Error != nil {
|
||||
return model.OidcClient{}, q.Error
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) ListClients(ctx context.Context, name string, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.OidcClient, utils.PaginationResponse, error) {
|
||||
func (s *OidcService) ListClients(ctx context.Context, name string, listRequestOptions utils.ListRequestOptions) ([]model.OidcClient, utils.PaginationResponse, error) {
|
||||
var clients []model.OidcClient
|
||||
|
||||
query := s.db.
|
||||
@@ -704,26 +712,21 @@ func (s *OidcService) ListClients(ctx context.Context, name string, sortedPagina
|
||||
}
|
||||
|
||||
// As allowedUserGroupsCount is not a column, we need to manually sort it
|
||||
if sortedPaginationRequest.Sort.Column == "allowedUserGroupsCount" && utils.IsValidSortDirection(sortedPaginationRequest.Sort.Direction) {
|
||||
if listRequestOptions.Sort.Column == "allowedUserGroupsCount" && utils.IsValidSortDirection(listRequestOptions.Sort.Direction) {
|
||||
query = query.Select("oidc_clients.*, COUNT(oidc_clients_allowed_user_groups.oidc_client_id)").
|
||||
Joins("LEFT JOIN oidc_clients_allowed_user_groups ON oidc_clients.id = oidc_clients_allowed_user_groups.oidc_client_id").
|
||||
Group("oidc_clients.id").
|
||||
Order("COUNT(oidc_clients_allowed_user_groups.oidc_client_id) " + sortedPaginationRequest.Sort.Direction)
|
||||
Order("COUNT(oidc_clients_allowed_user_groups.oidc_client_id) " + listRequestOptions.Sort.Direction)
|
||||
|
||||
response, err := utils.Paginate(sortedPaginationRequest.Pagination.Page, sortedPaginationRequest.Pagination.Limit, query, &clients)
|
||||
response, err := utils.Paginate(listRequestOptions.Pagination.Page, listRequestOptions.Pagination.Limit, query, &clients)
|
||||
return clients, response, err
|
||||
}
|
||||
|
||||
response, err := utils.PaginateAndSort(sortedPaginationRequest, query, &clients)
|
||||
response, err := utils.PaginateFilterAndSort(listRequestOptions, query, &clients)
|
||||
return clients, response, err
|
||||
}
|
||||
|
||||
func (s *OidcService) CreateClient(ctx context.Context, input dto.OidcClientCreateDto, userID string) (model.OidcClient, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
client := model.OidcClient{
|
||||
Base: model.Base{
|
||||
ID: input.ID,
|
||||
@@ -732,7 +735,7 @@ func (s *OidcService) CreateClient(ctx context.Context, input dto.OidcClientCrea
|
||||
}
|
||||
updateOIDCClientModelFromDto(&client, &input.OidcClientUpdateDto)
|
||||
|
||||
err := tx.
|
||||
err := s.db.
|
||||
WithContext(ctx).
|
||||
Create(&client).
|
||||
Error
|
||||
@@ -743,16 +746,19 @@ func (s *OidcService) CreateClient(ctx context.Context, input dto.OidcClientCrea
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
|
||||
// All storage operations must be executed outside of a transaction
|
||||
if input.LogoURL != nil {
|
||||
err = s.downloadAndSaveLogoFromURL(ctx, tx, client.ID, *input.LogoURL)
|
||||
err = s.downloadAndSaveLogoFromURL(ctx, client.ID, *input.LogoURL, true)
|
||||
if err != nil {
|
||||
return model.OidcClient{}, fmt.Errorf("failed to download logo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
if input.DarkLogoURL != nil {
|
||||
err = s.downloadAndSaveLogoFromURL(ctx, client.ID, *input.DarkLogoURL, false)
|
||||
if err != nil {
|
||||
return model.OidcClient{}, fmt.Errorf("failed to download dark logo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return client, nil
|
||||
@@ -760,31 +766,53 @@ func (s *OidcService) CreateClient(ctx context.Context, input dto.OidcClientCrea
|
||||
|
||||
func (s *OidcService) UpdateClient(ctx context.Context, clientID string, input dto.OidcClientUpdateDto) (model.OidcClient, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() { tx.Rollback() }()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
var client model.OidcClient
|
||||
if err := tx.WithContext(ctx).
|
||||
err := tx.WithContext(ctx).
|
||||
Preload("CreatedBy").
|
||||
First(&client, "id = ?", clientID).Error; err != nil {
|
||||
First(&client, "id = ?", clientID).Error
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
|
||||
updateOIDCClientModelFromDto(&client, &input)
|
||||
|
||||
if err := tx.WithContext(ctx).Save(&client).Error; err != nil {
|
||||
if !input.IsGroupRestricted {
|
||||
// Clear allowed user groups if the restriction is removed
|
||||
err = tx.Model(&client).Association("AllowedUserGroups").Clear()
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.WithContext(ctx).Save(&client).Error
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
|
||||
// All storage operations must be executed outside of a transaction
|
||||
if input.LogoURL != nil {
|
||||
err := s.downloadAndSaveLogoFromURL(ctx, tx, client.ID, *input.LogoURL)
|
||||
err = s.downloadAndSaveLogoFromURL(ctx, client.ID, *input.LogoURL, true)
|
||||
if err != nil {
|
||||
return model.OidcClient{}, fmt.Errorf("failed to download logo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := tx.Commit().Error; err != nil {
|
||||
return model.OidcClient{}, err
|
||||
if input.DarkLogoURL != nil {
|
||||
err = s.downloadAndSaveLogoFromURL(ctx, client.ID, *input.DarkLogoURL, false)
|
||||
if err != nil {
|
||||
return model.OidcClient{}, fmt.Errorf("failed to download dark logo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -798,6 +826,7 @@ func updateOIDCClientModelFromDto(client *model.OidcClient, input *dto.OidcClien
|
||||
client.PkceEnabled = input.IsPublic || input.PkceEnabled
|
||||
client.RequiresReauthentication = input.RequiresReauthentication
|
||||
client.LaunchURL = input.LaunchURL
|
||||
client.IsGroupRestricted = input.IsGroupRestricted
|
||||
|
||||
// Credentials
|
||||
client.Credentials.FederatedIdentities = make([]model.OidcClientFederatedIdentity, len(input.Credentials.FederatedIdentities))
|
||||
@@ -817,12 +846,24 @@ func (s *OidcService) DeleteClient(ctx context.Context, clientID string) error {
|
||||
err := s.db.
|
||||
WithContext(ctx).
|
||||
Where("id = ?", clientID).
|
||||
Clauses(clause.Returning{}).
|
||||
Delete(&client).
|
||||
Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete images if present
|
||||
// Note that storage operations must be done outside of a transaction
|
||||
if client.ImageType != nil && *client.ImageType != "" {
|
||||
old := path.Join("oidc-client-images", client.ID+"."+*client.ImageType)
|
||||
_ = s.fileStorage.Delete(ctx, old)
|
||||
}
|
||||
if client.DarkImageType != nil && *client.DarkImageType != "" {
|
||||
old := path.Join("oidc-client-images", client.ID+"-dark."+*client.DarkImageType)
|
||||
_ = s.fileStorage.Delete(ctx, old)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -868,50 +909,98 @@ func (s *OidcService) CreateClientSecret(ctx context.Context, clientID string) (
|
||||
return clientSecret, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) GetClientLogo(ctx context.Context, clientID string) (string, string, error) {
|
||||
func (s *OidcService) GetClientLogo(ctx context.Context, clientID string, light bool) (io.ReadCloser, int64, string, error) {
|
||||
var client model.OidcClient
|
||||
err := s.db.
|
||||
WithContext(ctx).
|
||||
First(&client, "id = ?", clientID).
|
||||
Error
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return nil, 0, "", err
|
||||
}
|
||||
|
||||
if client.ImageType == nil {
|
||||
return "", "", errors.New("image not found")
|
||||
var suffix string
|
||||
var ext string
|
||||
switch {
|
||||
case !light && client.DarkImageType != nil:
|
||||
// Dark logo if requested and exists
|
||||
suffix = "-dark"
|
||||
ext = *client.DarkImageType
|
||||
case client.ImageType != nil:
|
||||
// Light logo if requested or no dark logo is available
|
||||
ext = *client.ImageType
|
||||
default:
|
||||
return nil, 0, "", errors.New("image not found")
|
||||
}
|
||||
|
||||
imagePath := common.EnvConfig.UploadPath + "/oidc-client-images/" + client.ID + "." + *client.ImageType
|
||||
mimeType := utils.GetImageMimeType(*client.ImageType)
|
||||
mimeType := utils.GetImageMimeType(ext)
|
||||
if mimeType == "" {
|
||||
return nil, 0, "", fmt.Errorf("unsupported image type '%s'", ext)
|
||||
}
|
||||
key := path.Join("oidc-client-images", client.ID+suffix+"."+ext)
|
||||
reader, size, err := s.fileStorage.Open(ctx, key)
|
||||
if err != nil {
|
||||
return nil, 0, "", err
|
||||
}
|
||||
|
||||
return imagePath, mimeType, nil
|
||||
return reader, size, mimeType, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) UpdateClientLogo(ctx context.Context, clientID string, file *multipart.FileHeader) error {
|
||||
func (s *OidcService) UpdateClientLogo(ctx context.Context, clientID string, file *multipart.FileHeader, light bool) error {
|
||||
fileType := strings.ToLower(utils.GetFileExtension(file.Filename))
|
||||
if mimeType := utils.GetImageMimeType(fileType); mimeType == "" {
|
||||
return &common.FileTypeNotSupportedError{}
|
||||
}
|
||||
|
||||
imagePath := common.EnvConfig.UploadPath + "/oidc-client-images/" + clientID + "." + fileType
|
||||
err := utils.SaveFile(file, imagePath)
|
||||
var darkSuffix string
|
||||
if !light {
|
||||
darkSuffix = "-dark"
|
||||
}
|
||||
|
||||
imagePath := path.Join("oidc-client-images", clientID+darkSuffix+"."+fileType)
|
||||
reader, err := file.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
err = s.fileStorage.Save(ctx, imagePath, reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tx := s.db.Begin()
|
||||
|
||||
err = s.updateClientLogoType(ctx, tx, clientID, fileType)
|
||||
err = s.updateClientLogoType(ctx, clientID, fileType, light)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit().Error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OidcService) DeleteClientLogo(ctx context.Context, clientID string) error {
|
||||
return s.deleteClientLogoInternal(ctx, clientID, "", func(client *model.OidcClient) (string, error) {
|
||||
if client.ImageType == nil {
|
||||
return "", errors.New("image not found")
|
||||
}
|
||||
|
||||
oldImageType := *client.ImageType
|
||||
client.ImageType = nil
|
||||
return oldImageType, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *OidcService) DeleteClientDarkLogo(ctx context.Context, clientID string) error {
|
||||
return s.deleteClientLogoInternal(ctx, clientID, "-dark", func(client *model.OidcClient) (string, error) {
|
||||
if client.DarkImageType == nil {
|
||||
return "", errors.New("image not found")
|
||||
}
|
||||
|
||||
oldImageType := *client.DarkImageType
|
||||
client.DarkImageType = nil
|
||||
return oldImageType, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (s *OidcService) deleteClientLogoInternal(ctx context.Context, clientID string, imagePathSuffix string, setClientImage func(*model.OidcClient) (string, error)) error {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
@@ -926,13 +1015,11 @@ func (s *OidcService) DeleteClientLogo(ctx context.Context, clientID string) err
|
||||
return err
|
||||
}
|
||||
|
||||
if client.ImageType == nil {
|
||||
return errors.New("image not found")
|
||||
oldImageType, err := setClientImage(&client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldImageType := *client.ImageType
|
||||
client.ImageType = nil
|
||||
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Save(&client).
|
||||
@@ -941,12 +1028,14 @@ func (s *OidcService) DeleteClientLogo(ctx context.Context, clientID string) err
|
||||
return err
|
||||
}
|
||||
|
||||
imagePath := common.EnvConfig.UploadPath + "/oidc-client-images/" + client.ID + "." + oldImageType
|
||||
if err := os.Remove(imagePath); err != nil {
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
// All storage operations must be performed outside of a database transaction
|
||||
imagePath := path.Join("oidc-client-images", client.ID+imagePathSuffix+"."+oldImageType)
|
||||
err = s.fileStorage.Delete(ctx, imagePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -960,7 +1049,7 @@ func (s *OidcService) UpdateAllowedUserGroups(ctx context.Context, id string, in
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
client, err = s.getClientInternal(ctx, id, tx)
|
||||
client, err = s.getClientInternal(ctx, id, tx, true)
|
||||
if err != nil {
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
@@ -1002,6 +1091,7 @@ func (s *OidcService) UpdateAllowedUserGroups(ctx context.Context, id string, in
|
||||
return model.OidcClient{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return client, nil
|
||||
}
|
||||
|
||||
@@ -1082,13 +1172,20 @@ func (s *OidcService) createAuthorizationCode(ctx context.Context, clientID stri
|
||||
return randomString, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) validateCodeVerifier(codeVerifier, codeChallenge string, codeChallengeMethodSha256 bool) bool {
|
||||
func validateCodeVerifier(codeVerifier, codeChallenge string, codeChallengeMethodSha256 bool) bool {
|
||||
if codeVerifier == "" || codeChallenge == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
if !codeChallengeMethodSha256 {
|
||||
return codeVerifier == codeChallenge
|
||||
return subtle.ConstantTimeCompare([]byte(codeVerifier), []byte(codeChallenge)) == 1
|
||||
}
|
||||
|
||||
// Base64 URL decode the challenge
|
||||
// If it's not valid base64url, fail the operation
|
||||
codeChallengeBytes, err := base64.RawURLEncoding.DecodeString(codeChallenge)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Compute SHA-256 hash of the codeVerifier
|
||||
@@ -1096,10 +1193,7 @@ func (s *OidcService) validateCodeVerifier(codeVerifier, codeChallenge string, c
|
||||
h.Write([]byte(codeVerifier))
|
||||
codeVerifierHash := h.Sum(nil)
|
||||
|
||||
// Base64 URL encode the verifier hash
|
||||
encodedVerifierHash := base64.RawURLEncoding.EncodeToString(codeVerifierHash)
|
||||
|
||||
return encodedVerifierHash == codeChallenge
|
||||
return subtle.ConstantTimeCompare(codeVerifierHash, codeChallengeBytes) == 1
|
||||
}
|
||||
|
||||
func (s *OidcService) getCallbackURL(client *model.OidcClient, inputCallbackURL string, tx *gorm.DB, ctx context.Context) (callbackURL string, err error) {
|
||||
@@ -1114,7 +1208,7 @@ func (s *OidcService) getCallbackURL(client *model.OidcClient, inputCallbackURL
|
||||
|
||||
// If URLs are already configured, validate against them
|
||||
if len(client.CallbackURLs) > 0 {
|
||||
matched, err := s.getCallbackURLFromList(client.CallbackURLs, inputCallbackURL)
|
||||
matched, err := utils.GetCallbackURLFromList(client.CallbackURLs, inputCallbackURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if matched == "" {
|
||||
@@ -1137,7 +1231,7 @@ func (s *OidcService) getLogoutCallbackURL(client *model.OidcClient, inputLogout
|
||||
return client.LogoutCallbackURLs[0], nil
|
||||
}
|
||||
|
||||
matched, err := s.getCallbackURLFromList(client.LogoutCallbackURLs, inputLogoutCallbackURL)
|
||||
matched, err := utils.GetCallbackURLFromList(client.LogoutCallbackURLs, inputLogoutCallbackURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if matched == "" {
|
||||
@@ -1147,21 +1241,6 @@ func (s *OidcService) getLogoutCallbackURL(client *model.OidcClient, inputLogout
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) getCallbackURLFromList(urls []string, inputCallbackURL string) (callbackURL string, err error) {
|
||||
for _, callbackPattern := range urls {
|
||||
regexPattern := "^" + strings.ReplaceAll(regexp.QuoteMeta(callbackPattern), `\*`, ".*") + "$"
|
||||
matched, err := regexp.MatchString(regexPattern, inputCallbackURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if matched {
|
||||
return inputCallbackURL, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (s *OidcService) addCallbackURLToClient(ctx context.Context, client *model.OidcClient, callbackURL string, tx *gorm.DB) error {
|
||||
// Add the new callback URL to the existing list
|
||||
client.CallbackURLs = append(client.CallbackURLs, callbackURL)
|
||||
@@ -1203,6 +1282,7 @@ func (s *OidcService) CreateDeviceAuthorization(ctx context.Context, input dto.O
|
||||
ExpiresAt: datatype.DateTime(time.Now().Add(DeviceCodeDuration)),
|
||||
IsAuthorized: false,
|
||||
ClientID: client.ID,
|
||||
Nonce: input.Nonce,
|
||||
}
|
||||
|
||||
if err := s.db.Create(deviceAuth).Error; err != nil {
|
||||
@@ -1250,7 +1330,7 @@ func (s *OidcService) VerifyDeviceCode(ctx context.Context, userCode string, use
|
||||
return fmt.Errorf("error finding user groups: %w", err)
|
||||
}
|
||||
|
||||
if !s.IsUserGroupAllowedToAuthorize(user, deviceAuth.Client) {
|
||||
if !IsUserGroupAllowedToAuthorize(user, deviceAuth.Client) {
|
||||
return &common.OidcAccessDeniedError{}
|
||||
}
|
||||
|
||||
@@ -1323,9 +1403,10 @@ func (s *OidcService) GetDeviceCodeInfo(ctx context.Context, userCode string, us
|
||||
|
||||
return &dto.DeviceCodeInfoDto{
|
||||
Client: dto.OidcClientMetaDataDto{
|
||||
ID: deviceAuth.Client.ID,
|
||||
Name: deviceAuth.Client.Name,
|
||||
HasLogo: deviceAuth.Client.HasLogo(),
|
||||
ID: deviceAuth.Client.ID,
|
||||
Name: deviceAuth.Client.Name,
|
||||
HasLogo: deviceAuth.Client.HasLogo(),
|
||||
HasDarkLogo: deviceAuth.Client.HasDarkLogo(),
|
||||
},
|
||||
Scope: deviceAuth.Scope,
|
||||
AuthorizationRequired: !hasAuthorizedClient,
|
||||
@@ -1349,7 +1430,7 @@ func (s *OidcService) GetAllowedGroupsCountOfClient(ctx context.Context, id stri
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s *OidcService) ListAuthorizedClients(ctx context.Context, userID string, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.UserAuthorizedOidcClient, utils.PaginationResponse, error) {
|
||||
func (s *OidcService) ListAuthorizedClients(ctx context.Context, userID string, listRequestOptions utils.ListRequestOptions) ([]model.UserAuthorizedOidcClient, utils.PaginationResponse, error) {
|
||||
|
||||
query := s.db.
|
||||
WithContext(ctx).
|
||||
@@ -1358,7 +1439,7 @@ func (s *OidcService) ListAuthorizedClients(ctx context.Context, userID string,
|
||||
Where("user_id = ?", userID)
|
||||
|
||||
var authorizedClients []model.UserAuthorizedOidcClient
|
||||
response, err := utils.PaginateAndSort(sortedPaginationRequest, query, &authorizedClients)
|
||||
response, err := utils.PaginateFilterAndSort(listRequestOptions, query, &authorizedClients)
|
||||
|
||||
return authorizedClients, response, err
|
||||
}
|
||||
@@ -1391,7 +1472,7 @@ func (s *OidcService) RevokeAuthorizedClient(ctx context.Context, userID string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OidcService) ListAccessibleOidcClients(ctx context.Context, userID string, sortedPaginationRequest utils.SortedPaginationRequest) ([]dto.AccessibleOidcClientDto, utils.PaginationResponse, error) {
|
||||
func (s *OidcService) ListAccessibleOidcClients(ctx context.Context, userID string, listRequestOptions utils.ListRequestOptions) ([]dto.AccessibleOidcClientDto, utils.PaginationResponse, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
@@ -1438,13 +1519,13 @@ func (s *OidcService) ListAccessibleOidcClients(ctx context.Context, userID stri
|
||||
|
||||
// Handle custom sorting for lastUsedAt column
|
||||
var response utils.PaginationResponse
|
||||
if sortedPaginationRequest.Sort.Column == "lastUsedAt" && utils.IsValidSortDirection(sortedPaginationRequest.Sort.Direction) {
|
||||
if listRequestOptions.Sort.Column == "lastUsedAt" && utils.IsValidSortDirection(listRequestOptions.Sort.Direction) {
|
||||
query = query.
|
||||
Joins("LEFT JOIN user_authorized_oidc_clients ON oidc_clients.id = user_authorized_oidc_clients.client_id AND user_authorized_oidc_clients.user_id = ?", userID).
|
||||
Order("user_authorized_oidc_clients.last_used_at " + sortedPaginationRequest.Sort.Direction + " NULLS LAST")
|
||||
Order("user_authorized_oidc_clients.last_used_at " + listRequestOptions.Sort.Direction + " NULLS LAST")
|
||||
}
|
||||
|
||||
response, err = utils.PaginateAndSort(sortedPaginationRequest, query, &clients)
|
||||
response, err = utils.PaginateFilterAndSort(listRequestOptions, query, &clients)
|
||||
if err != nil {
|
||||
return nil, utils.PaginationResponse{}, err
|
||||
}
|
||||
@@ -1457,10 +1538,11 @@ func (s *OidcService) ListAccessibleOidcClients(ctx context.Context, userID stri
|
||||
}
|
||||
dtos[i] = dto.AccessibleOidcClientDto{
|
||||
OidcClientMetaDataDto: dto.OidcClientMetaDataDto{
|
||||
ID: client.ID,
|
||||
Name: client.Name,
|
||||
LaunchURL: client.LaunchURL,
|
||||
HasLogo: client.HasLogo(),
|
||||
ID: client.ID,
|
||||
Name: client.Name,
|
||||
LaunchURL: client.LaunchURL,
|
||||
HasLogo: client.HasLogo(),
|
||||
HasDarkLogo: client.HasDarkLogo(),
|
||||
},
|
||||
LastUsedAt: lastUsedAt,
|
||||
}
|
||||
@@ -1737,7 +1819,7 @@ func (s *OidcService) GetClientPreview(ctx context.Context, clientID string, use
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
client, err := s.getClientInternal(ctx, clientID, tx)
|
||||
client, err := s.getClientInternal(ctx, clientID, tx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1752,7 +1834,7 @@ func (s *OidcService) GetClientPreview(ctx context.Context, clientID string, use
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !s.IsUserGroupAllowedToAuthorize(user, client) {
|
||||
if !IsUserGroupAllowedToAuthorize(user, client) {
|
||||
return nil, &common.OidcAccessDeniedError{}
|
||||
}
|
||||
|
||||
@@ -1879,10 +1961,28 @@ func (s *OidcService) IsClientAccessibleToUser(ctx context.Context, clientID str
|
||||
return false, err
|
||||
}
|
||||
|
||||
return s.IsUserGroupAllowedToAuthorize(user, client), nil
|
||||
return IsUserGroupAllowedToAuthorize(user, client), nil
|
||||
}
|
||||
|
||||
func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, tx *gorm.DB, clientID string, raw string) error {
|
||||
var errLogoTooLarge = errors.New("logo is too large")
|
||||
|
||||
func httpClientWithCheckRedirect(source *http.Client, checkRedirect func(req *http.Request, via []*http.Request) error) *http.Client {
|
||||
if source == nil {
|
||||
source = http.DefaultClient
|
||||
}
|
||||
|
||||
// Create a new client that clones the transport
|
||||
client := &http.Client{
|
||||
Transport: source.Transport,
|
||||
}
|
||||
|
||||
// Assign the CheckRedirect function
|
||||
client.CheckRedirect = checkRedirect
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, clientID string, raw string, light bool) error {
|
||||
u, err := url.Parse(raw)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1891,18 +1991,29 @@ func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, tx *
|
||||
ctx, cancel := context.WithTimeout(parentCtx, 15*time.Second)
|
||||
defer cancel()
|
||||
|
||||
r := net.Resolver{}
|
||||
ips, err := r.LookupIPAddr(ctx, u.Hostname())
|
||||
if err != nil || len(ips) == 0 {
|
||||
return fmt.Errorf("cannot resolve hostname")
|
||||
// Prevents SSRF by allowing only public IPs
|
||||
ok, err := utils.IsURLPrivate(ctx, u)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
return errors.New("private IP addresses are not allowed")
|
||||
}
|
||||
|
||||
// Prevents SSRF by allowing only public IPs
|
||||
for _, addr := range ips {
|
||||
if utils.IsPrivateIP(addr.IP) {
|
||||
return fmt.Errorf("private IP addresses are not allowed")
|
||||
// We need to check this on redirects too
|
||||
client := httpClientWithCheckRedirect(s.httpClient, func(r *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
}
|
||||
|
||||
ok, err := utils.IsURLPrivate(r.Context(), r.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
return errors.New("private IP addresses are not allowed")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, raw, nil)
|
||||
if err != nil {
|
||||
@@ -1911,7 +2022,7 @@ func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, tx *
|
||||
req.Header.Set("User-Agent", "pocket-id/oidc-logo-fetcher")
|
||||
req.Header.Set("Accept", "image/*")
|
||||
|
||||
resp, err := s.httpClient.Do(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1923,7 +2034,7 @@ func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, tx *
|
||||
|
||||
const maxLogoSize int64 = 2 * 1024 * 1024 // 2MB
|
||||
if resp.ContentLength > maxLogoSize {
|
||||
return fmt.Errorf("logo is too large")
|
||||
return errLogoTooLarge
|
||||
}
|
||||
|
||||
// Prefer extension in path if supported
|
||||
@@ -1937,31 +2048,89 @@ func (s *OidcService) downloadAndSaveLogoFromURL(parentCtx context.Context, tx *
|
||||
return &common.FileTypeNotSupportedError{}
|
||||
}
|
||||
|
||||
imagePath := common.EnvConfig.UploadPath + "/oidc-client-images/" + clientID + "." + ext
|
||||
err = utils.SaveFileStream(io.LimitReader(resp.Body, maxLogoSize+1), imagePath)
|
||||
if err != nil {
|
||||
var darkSuffix string
|
||||
if !light {
|
||||
darkSuffix = "-dark"
|
||||
}
|
||||
|
||||
imagePath := path.Join("oidc-client-images", clientID+darkSuffix+"."+ext)
|
||||
err = s.fileStorage.Save(ctx, imagePath, utils.NewLimitReader(resp.Body, maxLogoSize+1))
|
||||
if errors.Is(err, utils.ErrSizeExceeded) {
|
||||
return errLogoTooLarge
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.updateClientLogoType(ctx, tx, clientID, ext); err != nil {
|
||||
err = s.updateClientLogoType(ctx, clientID, ext, light)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OidcService) updateClientLogoType(ctx context.Context, tx *gorm.DB, clientID, ext string) error {
|
||||
uploadsDir := common.EnvConfig.UploadPath + "/oidc-client-images"
|
||||
func (s *OidcService) updateClientLogoType(ctx context.Context, clientID string, ext string, light bool) error {
|
||||
var darkSuffix string
|
||||
if !light {
|
||||
darkSuffix = "-dark"
|
||||
}
|
||||
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
// We need to acquire an update lock for the row to be locked, since we'll update it later
|
||||
var client model.OidcClient
|
||||
if err := tx.WithContext(ctx).First(&client, "id = ?", clientID).Error; err != nil {
|
||||
return err
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&client, "id = ?", clientID).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to look up client: %w", err)
|
||||
}
|
||||
if client.ImageType != nil && *client.ImageType != ext {
|
||||
old := fmt.Sprintf("%s/%s.%s", uploadsDir, client.ID, *client.ImageType)
|
||||
_ = os.Remove(old)
|
||||
}
|
||||
client.ImageType = &ext
|
||||
return tx.WithContext(ctx).Save(&client).Error
|
||||
|
||||
var currentType *string
|
||||
if light {
|
||||
currentType = client.ImageType
|
||||
client.ImageType = &ext
|
||||
} else {
|
||||
currentType = client.DarkImageType
|
||||
client.DarkImageType = &ext
|
||||
}
|
||||
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Save(&client).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save updated client: %w", err)
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
// Storage operations must be executed outside of a transaction
|
||||
if currentType != nil && *currentType != ext {
|
||||
old := path.Join("oidc-client-images", client.ID+darkSuffix+"."+*currentType)
|
||||
_ = s.fileStorage.Delete(ctx, old)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *OidcService) GetClientScimServiceProvider(ctx context.Context, clientID string) (model.ScimServiceProvider, error) {
|
||||
var provider model.ScimServiceProvider
|
||||
err := s.db.
|
||||
WithContext(ctx).
|
||||
First(&provider, "oidc_client_id = ?", clientID).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.ScimServiceProvider{}, err
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
@@ -5,8 +5,13 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -19,6 +24,7 @@ import (
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
testutils "github.com/pocket-id/pocket-id/backend/internal/utils/testing"
|
||||
)
|
||||
|
||||
@@ -142,6 +148,7 @@ func TestOidcService_verifyClientCredentialsInternal(t *testing.T) {
|
||||
var err error
|
||||
// Create a test database
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
common.EnvConfig.EncryptionKey = []byte("0123456789abcdef0123456789abcdef")
|
||||
|
||||
// Create two JWKs for testing
|
||||
privateJWK, jwkSetJSON := generateTestECDSAKey(t)
|
||||
@@ -510,3 +517,460 @@ func TestOidcService_verifyClientCredentialsInternal(t *testing.T) {
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateCodeVerifier_Plain(t *testing.T) {
|
||||
require.False(t, validateCodeVerifier("", "", false))
|
||||
require.False(t, validateCodeVerifier("", "", true))
|
||||
|
||||
t.Run("plain", func(t *testing.T) {
|
||||
require.False(t, validateCodeVerifier("", "challenge", false))
|
||||
require.False(t, validateCodeVerifier("verifier", "", false))
|
||||
require.True(t, validateCodeVerifier("plainVerifier", "plainVerifier", false))
|
||||
require.False(t, validateCodeVerifier("plainVerifier", "otherVerifier", false))
|
||||
})
|
||||
|
||||
t.Run("SHA 256", func(t *testing.T) {
|
||||
codeVerifier := "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk"
|
||||
hash := sha256.Sum256([]byte(codeVerifier))
|
||||
codeChallenge := base64.RawURLEncoding.EncodeToString(hash[:])
|
||||
|
||||
require.True(t, validateCodeVerifier(codeVerifier, codeChallenge, true))
|
||||
require.False(t, validateCodeVerifier("wrongVerifier", codeChallenge, true))
|
||||
require.False(t, validateCodeVerifier(codeVerifier, "!", true))
|
||||
|
||||
// Invalid base64
|
||||
require.False(t, validateCodeVerifier("NOT!VALID", codeChallenge, true))
|
||||
})
|
||||
}
|
||||
|
||||
func TestOidcService_updateClientLogoType(t *testing.T) {
|
||||
// Create a test database
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
|
||||
// Create database storage
|
||||
dbStorage, err := storage.NewDatabaseStorage(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Init the OidcService
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
}
|
||||
|
||||
// Create a test client
|
||||
client := model.OidcClient{
|
||||
Name: "Test Client",
|
||||
CallbackURLs: model.UrlList{"https://example.com/callback"},
|
||||
}
|
||||
err = db.Create(&client).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
// Helper function to check if a file exists in storage
|
||||
fileExists := func(t *testing.T, path string) bool {
|
||||
t.Helper()
|
||||
_, _, err := dbStorage.Open(t.Context(), path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Helper function to create a dummy file in storage
|
||||
createDummyFile := func(t *testing.T, path string) {
|
||||
t.Helper()
|
||||
err := dbStorage.Save(t.Context(), path, strings.NewReader("dummy content"))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
t.Run("Updates light logo type for client without previous logo", func(t *testing.T) {
|
||||
// Update the logo type
|
||||
err := s.updateClientLogoType(t.Context(), client.ID, "png", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.ImageType)
|
||||
assert.Equal(t, "png", *updatedClient.ImageType)
|
||||
})
|
||||
|
||||
t.Run("Updates dark logo type for client without previous dark logo", func(t *testing.T) {
|
||||
// Update the dark logo type
|
||||
err := s.updateClientLogoType(t.Context(), client.ID, "jpg", false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.DarkImageType)
|
||||
assert.Equal(t, "jpg", *updatedClient.DarkImageType)
|
||||
})
|
||||
|
||||
t.Run("Updates light logo type and deletes old file when type changes", func(t *testing.T) {
|
||||
// Create the old PNG file in storage
|
||||
oldPath := "oidc-client-images/" + client.ID + ".png"
|
||||
createDummyFile(t, oldPath)
|
||||
require.True(t, fileExists(t, oldPath), "Old file should exist before update")
|
||||
|
||||
// Client currently has a PNG logo, update to WEBP
|
||||
err := s.updateClientLogoType(t.Context(), client.ID, "webp", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.ImageType)
|
||||
assert.Equal(t, "webp", *updatedClient.ImageType)
|
||||
|
||||
// Old PNG file should be deleted
|
||||
assert.False(t, fileExists(t, oldPath), "Old PNG file should have been deleted")
|
||||
})
|
||||
|
||||
t.Run("Updates dark logo type and deletes old file when type changes", func(t *testing.T) {
|
||||
// Create the old JPG dark file in storage
|
||||
oldPath := "oidc-client-images/" + client.ID + "-dark.jpg"
|
||||
createDummyFile(t, oldPath)
|
||||
require.True(t, fileExists(t, oldPath), "Old dark file should exist before update")
|
||||
|
||||
// Client currently has a JPG dark logo, update to WEBP
|
||||
err := s.updateClientLogoType(t.Context(), client.ID, "webp", false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.DarkImageType)
|
||||
assert.Equal(t, "webp", *updatedClient.DarkImageType)
|
||||
|
||||
// Old JPG dark file should be deleted
|
||||
assert.False(t, fileExists(t, oldPath), "Old JPG dark file should have been deleted")
|
||||
})
|
||||
|
||||
t.Run("Does not delete file when type remains the same", func(t *testing.T) {
|
||||
// Create the WEBP file in storage
|
||||
webpPath := "oidc-client-images/" + client.ID + ".webp"
|
||||
createDummyFile(t, webpPath)
|
||||
require.True(t, fileExists(t, webpPath), "WEBP file should exist before update")
|
||||
|
||||
// Update to the same type (WEBP)
|
||||
err := s.updateClientLogoType(t.Context(), client.ID, "webp", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the client still has WEBP
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.ImageType)
|
||||
assert.Equal(t, "webp", *updatedClient.ImageType)
|
||||
|
||||
// WEBP file should still exist since type didn't change
|
||||
assert.True(t, fileExists(t, webpPath), "WEBP file should still exist")
|
||||
})
|
||||
|
||||
t.Run("Returns error for non-existent client", func(t *testing.T) {
|
||||
err := s.updateClientLogoType(t.Context(), "non-existent-client-id", "png", true)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "failed to look up client")
|
||||
})
|
||||
}
|
||||
|
||||
func TestOidcService_downloadAndSaveLogoFromURL(t *testing.T) {
|
||||
// Create a test database
|
||||
db := testutils.NewDatabaseForTest(t)
|
||||
|
||||
// Create database storage
|
||||
dbStorage, err := storage.NewDatabaseStorage(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a test client
|
||||
client := model.OidcClient{
|
||||
Name: "Test Client",
|
||||
CallbackURLs: model.UrlList{"https://example.com/callback"},
|
||||
}
|
||||
err = db.Create(&client).Error
|
||||
require.NoError(t, err)
|
||||
|
||||
// Helper function to check if a file exists in storage
|
||||
fileExists := func(t *testing.T, path string) bool {
|
||||
t.Helper()
|
||||
_, _, err := dbStorage.Open(t.Context(), path)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Helper function to get file content from storage
|
||||
getFileContent := func(t *testing.T, path string) []byte {
|
||||
t.Helper()
|
||||
reader, _, err := dbStorage.Open(t.Context(), path)
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
content, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
t.Run("Successfully downloads and saves PNG logo from URL", func(t *testing.T) {
|
||||
// Create mock PNG content
|
||||
pngContent := []byte("fake-png-content")
|
||||
|
||||
// Create a mock HTTP response with headers
|
||||
//nolint:bodyclose
|
||||
pngResponse := testutils.NewMockResponse(http.StatusOK, string(pngContent))
|
||||
pngResponse.Header.Set("Content-Type", "image/png")
|
||||
|
||||
// Create a mock HTTP client with responses
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/logo.png": pngResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
// Init the OidcService with mock HTTP client
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
// Download and save the logo
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/logo.png", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the file was saved
|
||||
logoPath := "oidc-client-images/" + client.ID + ".png"
|
||||
require.True(t, fileExists(t, logoPath), "Logo file should exist in storage")
|
||||
|
||||
// Verify the content
|
||||
savedContent := getFileContent(t, logoPath)
|
||||
assert.Equal(t, pngContent, savedContent)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.ImageType)
|
||||
assert.Equal(t, "png", *updatedClient.ImageType)
|
||||
})
|
||||
|
||||
t.Run("Successfully downloads and saves dark logo", func(t *testing.T) {
|
||||
// Create mock WEBP content
|
||||
webpContent := []byte("fake-webp-content")
|
||||
|
||||
//nolint:bodyclose
|
||||
webpResponse := testutils.NewMockResponse(http.StatusOK, string(webpContent))
|
||||
webpResponse.Header.Set("Content-Type", "image/webp")
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/dark-logo.webp": webpResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
// Download and save the dark logo
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/dark-logo.webp", false)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify the dark logo file was saved
|
||||
darkLogoPath := "oidc-client-images/" + client.ID + "-dark.webp"
|
||||
require.True(t, fileExists(t, darkLogoPath), "Dark logo file should exist in storage")
|
||||
|
||||
// Verify the content
|
||||
savedContent := getFileContent(t, darkLogoPath)
|
||||
assert.Equal(t, webpContent, savedContent)
|
||||
|
||||
// Verify the client was updated
|
||||
var updatedClient model.OidcClient
|
||||
err = db.First(&updatedClient, "id = ?", client.ID).Error
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, updatedClient.DarkImageType)
|
||||
assert.Equal(t, "webp", *updatedClient.DarkImageType)
|
||||
})
|
||||
|
||||
t.Run("Detects extension from URL path", func(t *testing.T) {
|
||||
svgContent := []byte("<svg></svg>")
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/icon.svg": testutils.NewMockResponse(http.StatusOK, string(svgContent)),
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/icon.svg", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify SVG file was saved
|
||||
logoPath := "oidc-client-images/" + client.ID + ".svg"
|
||||
require.True(t, fileExists(t, logoPath), "SVG logo should exist")
|
||||
})
|
||||
|
||||
t.Run("Detects extension from Content-Type when path has no extension", func(t *testing.T) {
|
||||
jpgContent := []byte("fake-jpg-content")
|
||||
|
||||
//nolint:bodyclose
|
||||
jpgResponse := testutils.NewMockResponse(http.StatusOK, string(jpgContent))
|
||||
jpgResponse.Header.Set("Content-Type", "image/jpeg")
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/logo": jpgResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/logo", true)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify JPG file was saved (jpeg extension is normalized to jpg)
|
||||
logoPath := "oidc-client-images/" + client.ID + ".jpg"
|
||||
require.True(t, fileExists(t, logoPath), "JPG logo should exist")
|
||||
})
|
||||
|
||||
t.Run("Returns error for invalid URL", func(t *testing.T) {
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: &http.Client{},
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "://invalid-url", true)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("Returns error for non-200 status code", func(t *testing.T) {
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/not-found.png": testutils.NewMockResponse(http.StatusNotFound, "Not Found"),
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/not-found.png", true)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "failed to fetch logo")
|
||||
})
|
||||
|
||||
t.Run("Returns error for too large content", func(t *testing.T) {
|
||||
// Create content larger than 2MB (maxLogoSize)
|
||||
largeContent := strings.Repeat("x", 2<<20+100) // 2.1MB
|
||||
|
||||
//nolint:bodyclose
|
||||
largeResponse := testutils.NewMockResponse(http.StatusOK, largeContent)
|
||||
largeResponse.Header.Set("Content-Type", "image/png")
|
||||
largeResponse.Header.Set("Content-Length", strconv.Itoa(len(largeContent)))
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/large.png": largeResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/large.png", true)
|
||||
require.Error(t, err)
|
||||
require.ErrorIs(t, err, errLogoTooLarge)
|
||||
})
|
||||
|
||||
t.Run("Returns error for unsupported file type", func(t *testing.T) {
|
||||
//nolint:bodyclose
|
||||
textResponse := testutils.NewMockResponse(http.StatusOK, "text content")
|
||||
textResponse.Header.Set("Content-Type", "text/plain")
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/file.txt": textResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), client.ID, "https://example.com/file.txt", true)
|
||||
require.Error(t, err)
|
||||
var fileTypeErr *common.FileTypeNotSupportedError
|
||||
require.ErrorAs(t, err, &fileTypeErr)
|
||||
})
|
||||
|
||||
t.Run("Returns error for non-existent client", func(t *testing.T) {
|
||||
//nolint:bodyclose
|
||||
pngResponse := testutils.NewMockResponse(http.StatusOK, "content")
|
||||
pngResponse.Header.Set("Content-Type", "image/png")
|
||||
|
||||
mockResponses := map[string]*http.Response{
|
||||
//nolint:bodyclose
|
||||
"https://example.com/logo.png": pngResponse,
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &testutils.MockRoundTripper{
|
||||
Responses: mockResponses,
|
||||
},
|
||||
}
|
||||
|
||||
s := &OidcService{
|
||||
db: db,
|
||||
fileStorage: dbStorage,
|
||||
httpClient: httpClient,
|
||||
}
|
||||
|
||||
err := s.downloadAndSaveLogoFromURL(t.Context(), "non-existent-client-id", "https://example.com/logo.png", true)
|
||||
require.Error(t, err)
|
||||
require.ErrorContains(t, err, "failed to look up client")
|
||||
})
|
||||
}
|
||||
|
||||
816
backend/internal/service/scim_service.go
Normal file
816
backend/internal/service/scim_service.go
Normal file
@@ -0,0 +1,816 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-co-op/gocron/v2"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
scimUserSchema = "urn:ietf:params:scim:schemas:core:2.0:User"
|
||||
scimGroupSchema = "urn:ietf:params:scim:schemas:core:2.0:Group"
|
||||
scimContentType = "application/scim+json"
|
||||
)
|
||||
|
||||
const scimErrorBodyLimit = 4096
|
||||
|
||||
type scimSyncAction int
|
||||
|
||||
type Scheduler interface {
|
||||
RegisterJob(ctx context.Context, name string, def gocron.JobDefinition, job func(ctx context.Context) error, runImmediately bool, extraOptions ...gocron.JobOption) error
|
||||
RemoveJob(name string) error
|
||||
}
|
||||
|
||||
const (
|
||||
scimActionNone scimSyncAction = iota
|
||||
scimActionCreated
|
||||
scimActionUpdated
|
||||
scimActionDeleted
|
||||
)
|
||||
|
||||
type scimSyncStats struct {
|
||||
Created int
|
||||
Updated int
|
||||
Deleted int
|
||||
}
|
||||
|
||||
// ScimService handles SCIM provisioning to external service providers.
|
||||
type ScimService struct {
|
||||
db *gorm.DB
|
||||
scheduler Scheduler
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func NewScimService(db *gorm.DB, scheduler Scheduler, httpClient *http.Client) *ScimService {
|
||||
if httpClient == nil {
|
||||
httpClient = &http.Client{Timeout: 20 * time.Second}
|
||||
}
|
||||
|
||||
return &ScimService{db: db, scheduler: scheduler, httpClient: httpClient}
|
||||
}
|
||||
|
||||
func (s *ScimService) GetServiceProvider(
|
||||
ctx context.Context,
|
||||
serviceProviderID string,
|
||||
) (model.ScimServiceProvider, error) {
|
||||
var provider model.ScimServiceProvider
|
||||
err := s.db.WithContext(ctx).
|
||||
Preload("OidcClient").
|
||||
Preload("OidcClient.AllowedUserGroups").
|
||||
First(&provider, "id = ?", serviceProviderID).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.ScimServiceProvider{}, err
|
||||
}
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) ListServiceProviders(ctx context.Context) ([]model.ScimServiceProvider, error) {
|
||||
var providers []model.ScimServiceProvider
|
||||
err := s.db.WithContext(ctx).
|
||||
Preload("OidcClient").
|
||||
Find(&providers).
|
||||
Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return providers, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) CreateServiceProvider(
|
||||
ctx context.Context,
|
||||
input *dto.ScimServiceProviderCreateDTO) (model.ScimServiceProvider, error) {
|
||||
provider := model.ScimServiceProvider{
|
||||
Endpoint: input.Endpoint,
|
||||
Token: datatype.EncryptedString(input.Token),
|
||||
OidcClientID: input.OidcClientID,
|
||||
}
|
||||
|
||||
if err := s.db.WithContext(ctx).Create(&provider).Error; err != nil {
|
||||
return model.ScimServiceProvider{}, err
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) UpdateServiceProvider(ctx context.Context,
|
||||
serviceProviderID string,
|
||||
input *dto.ScimServiceProviderCreateDTO,
|
||||
) (model.ScimServiceProvider, error) {
|
||||
var provider model.ScimServiceProvider
|
||||
err := s.db.WithContext(ctx).
|
||||
First(&provider, "id = ?", serviceProviderID).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.ScimServiceProvider{}, err
|
||||
}
|
||||
|
||||
provider.Endpoint = input.Endpoint
|
||||
provider.Token = datatype.EncryptedString(input.Token)
|
||||
provider.OidcClientID = input.OidcClientID
|
||||
|
||||
if err := s.db.WithContext(ctx).Save(&provider).Error; err != nil {
|
||||
return model.ScimServiceProvider{}, err
|
||||
}
|
||||
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) DeleteServiceProvider(ctx context.Context, serviceProviderID string) error {
|
||||
return s.db.WithContext(ctx).
|
||||
Delete(&model.ScimServiceProvider{}, "id = ?", serviceProviderID).
|
||||
Error
|
||||
}
|
||||
|
||||
//nolint:contextcheck
|
||||
func (s *ScimService) ScheduleSync() {
|
||||
jobName := "ScheduledScimSync"
|
||||
start := time.Now().Add(5 * time.Minute)
|
||||
|
||||
_ = s.scheduler.RemoveJob(jobName)
|
||||
|
||||
err := s.scheduler.RegisterJob(
|
||||
context.Background(), jobName,
|
||||
gocron.OneTimeJob(gocron.OneTimeJobStartDateTime(start)), s.SyncAll, false)
|
||||
|
||||
if err != nil {
|
||||
slog.Error("Failed to schedule SCIM sync", slog.Any("error", err))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ScimService) SyncAll(ctx context.Context) error {
|
||||
providers, err := s.ListServiceProviders(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, provider := range providers {
|
||||
if ctx.Err() != nil {
|
||||
errs = append(errs, ctx.Err())
|
||||
break
|
||||
}
|
||||
if err := s.SyncServiceProvider(ctx, provider.ID); err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to sync SCIM provider %s: %w", provider.ID, err))
|
||||
}
|
||||
}
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (s *ScimService) SyncServiceProvider(ctx context.Context, serviceProviderID string) error {
|
||||
start := time.Now()
|
||||
provider, err := s.GetServiceProvider(ctx, serviceProviderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.InfoContext(ctx, "Syncing SCIM service provider",
|
||||
slog.String("provider_id", provider.ID),
|
||||
slog.String("oidc_client_id", provider.OidcClientID),
|
||||
)
|
||||
|
||||
allowedGroupIDs := groupIDs(provider.OidcClient.AllowedUserGroups)
|
||||
|
||||
// Load users and groups that should be synced to the SCIM provider
|
||||
groups, err := s.groupsForClient(ctx, provider.OidcClient, allowedGroupIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
users, err := s.usersForClient(ctx, provider.OidcClient, allowedGroupIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Load users and groups that already exist in the SCIM provider
|
||||
userResources, err := listScimResources[dto.ScimUser](s, ctx, provider, "/Users")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
groupResources, err := listScimResources[dto.ScimGroup](s, ctx, provider, "/Groups")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var errs []error
|
||||
var userStats scimSyncStats
|
||||
var groupStats scimSyncStats
|
||||
|
||||
// Sync users first, so that groups can reference them
|
||||
if stats, err := s.syncUsers(ctx, provider, users, &userResources); err != nil {
|
||||
errs = append(errs, err)
|
||||
userStats = stats
|
||||
} else {
|
||||
userStats = stats
|
||||
}
|
||||
|
||||
stats, err := s.syncGroups(ctx, provider, groups, groupResources.Resources, userResources.Resources)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
groupStats = stats
|
||||
} else {
|
||||
groupStats = stats
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
slog.WarnContext(ctx, "SCIM sync completed with errors",
|
||||
slog.String("provider_id", provider.ID),
|
||||
slog.Int("error_count", len(errs)),
|
||||
slog.Int("users_created", userStats.Created),
|
||||
slog.Int("users_updated", userStats.Updated),
|
||||
slog.Int("users_deleted", userStats.Deleted),
|
||||
slog.Int("groups_created", groupStats.Created),
|
||||
slog.Int("groups_updated", groupStats.Updated),
|
||||
slog.Int("groups_deleted", groupStats.Deleted),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
provider.LastSyncedAt = utils.Ptr(datatype.DateTime(time.Now()))
|
||||
if err := s.db.WithContext(ctx).Save(&provider).Error; err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
slog.InfoContext(ctx, "SCIM sync completed",
|
||||
slog.String("provider_id", provider.ID),
|
||||
slog.Int("users_created", userStats.Created),
|
||||
slog.Int("users_updated", userStats.Updated),
|
||||
slog.Int("users_deleted", userStats.Deleted),
|
||||
slog.Int("groups_created", groupStats.Created),
|
||||
slog.Int("groups_updated", groupStats.Updated),
|
||||
slog.Int("groups_deleted", groupStats.Deleted),
|
||||
slog.Duration("duration", time.Since(start)),
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ScimService) syncUsers(
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
users []model.User,
|
||||
resourceList *dto.ScimListResponse[dto.ScimUser],
|
||||
) (stats scimSyncStats, err error) {
|
||||
var errs []error
|
||||
|
||||
// Update or create users
|
||||
for _, u := range users {
|
||||
existing := getResourceByExternalID[dto.ScimUser](u.ID, resourceList.Resources)
|
||||
|
||||
action, created, err := s.syncUser(ctx, provider, u, existing)
|
||||
if created != nil && existing == nil {
|
||||
resourceList.Resources = append(resourceList.Resources, *created)
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update stats based on action taken by syncUser
|
||||
switch action {
|
||||
case scimActionCreated:
|
||||
stats.Created++
|
||||
case scimActionUpdated:
|
||||
stats.Updated++
|
||||
case scimActionDeleted:
|
||||
stats.Deleted++
|
||||
case scimActionNone:
|
||||
}
|
||||
}
|
||||
|
||||
// Delete users that are present in SCIM provider but not locally.
|
||||
userSet := make(map[string]struct{})
|
||||
for _, u := range users {
|
||||
userSet[u.ID] = struct{}{}
|
||||
}
|
||||
|
||||
for _, r := range resourceList.Resources {
|
||||
if _, ok := userSet[r.ExternalID]; !ok {
|
||||
if err := s.deleteScimResource(ctx, provider, "/Users/"+url.PathEscape(r.ID)); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
stats.Deleted++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats, errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (s *ScimService) syncGroups(
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
groups []model.UserGroup,
|
||||
remoteGroups []dto.ScimGroup,
|
||||
userResources []dto.ScimUser,
|
||||
) (stats scimSyncStats, err error) {
|
||||
var errs []error
|
||||
|
||||
// Update or create groups
|
||||
for _, g := range groups {
|
||||
existing := getResourceByExternalID[dto.ScimGroup](g.ID, remoteGroups)
|
||||
|
||||
action, err := s.syncGroup(ctx, provider, g, existing, userResources)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update stats based on action taken by syncGroup
|
||||
switch action {
|
||||
case scimActionCreated:
|
||||
stats.Created++
|
||||
case scimActionUpdated:
|
||||
stats.Updated++
|
||||
case scimActionDeleted:
|
||||
stats.Deleted++
|
||||
case scimActionNone:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Delete groups that are present in SCIM provider but not locally
|
||||
groupSet := make(map[string]struct{})
|
||||
for _, g := range groups {
|
||||
groupSet[g.ID] = struct{}{}
|
||||
}
|
||||
|
||||
for _, r := range remoteGroups {
|
||||
if _, ok := groupSet[r.ExternalID]; !ok {
|
||||
if err := s.deleteScimResource(ctx, provider, "/Groups/"+url.PathEscape(r.GetID())); err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
stats.Deleted++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stats, errors.Join(errs...)
|
||||
}
|
||||
|
||||
func (s *ScimService) syncUser(ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
user model.User,
|
||||
userResource *dto.ScimUser,
|
||||
) (scimSyncAction, *dto.ScimUser, error) {
|
||||
// If user is not allowed for the client, delete it from SCIM provider
|
||||
if userResource != nil && !IsUserGroupAllowedToAuthorize(user, provider.OidcClient) {
|
||||
return scimActionDeleted, nil, s.deleteScimResource(ctx, provider, fmt.Sprintf("/Users/%s", url.PathEscape(userResource.ID)))
|
||||
}
|
||||
|
||||
payload := dto.ScimUser{
|
||||
ScimResourceData: dto.ScimResourceData{
|
||||
Schemas: []string{scimUserSchema},
|
||||
ExternalID: user.ID,
|
||||
},
|
||||
UserName: user.Username,
|
||||
Name: &dto.ScimName{
|
||||
GivenName: user.FirstName,
|
||||
FamilyName: user.LastName,
|
||||
},
|
||||
Display: user.DisplayName,
|
||||
Active: !user.Disabled,
|
||||
}
|
||||
|
||||
if user.Email != nil {
|
||||
payload.Emails = []dto.ScimEmail{{
|
||||
Value: *user.Email,
|
||||
Primary: true,
|
||||
}}
|
||||
}
|
||||
|
||||
// If the user exists on the SCIM provider, and it has been modified, update it
|
||||
if userResource != nil {
|
||||
if user.LastModified().Before(userResource.GetMeta().LastModified) {
|
||||
return scimActionNone, nil, nil
|
||||
}
|
||||
path := fmt.Sprintf("/Users/%s", url.PathEscape(userResource.GetID()))
|
||||
userResource, err := updateScimResource(s, ctx, provider, path, payload)
|
||||
if err != nil {
|
||||
return scimActionNone, nil, err
|
||||
}
|
||||
return scimActionUpdated, userResource, nil
|
||||
}
|
||||
|
||||
// Otherwise, create a new SCIM user
|
||||
userResource, err := createScimResource(s, ctx, provider, "/Users", payload)
|
||||
if err != nil {
|
||||
return scimActionNone, nil, err
|
||||
}
|
||||
|
||||
return scimActionCreated, userResource, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) syncGroup(
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
group model.UserGroup,
|
||||
groupResource *dto.ScimGroup,
|
||||
userResources []dto.ScimUser,
|
||||
) (scimSyncAction, error) {
|
||||
// If group is not allowed for the client, delete it from SCIM provider
|
||||
if groupResource != nil && !groupAllowedForClient(group.ID, provider.OidcClient) {
|
||||
return scimActionDeleted, s.deleteScimResource(ctx, provider, fmt.Sprintf("/Groups/%s", url.PathEscape(groupResource.GetID())))
|
||||
}
|
||||
|
||||
// Prepare group members
|
||||
members := make([]dto.ScimGroupMember, len(group.Users))
|
||||
for i, user := range group.Users {
|
||||
userResource := getResourceByExternalID[dto.ScimUser](user.ID, userResources)
|
||||
if userResource == nil {
|
||||
// Groups depend on user IDs already being provisioned
|
||||
return scimActionNone, fmt.Errorf("cannot sync group %s: user %s is not provisioned in SCIM provider", group.ID, user.ID)
|
||||
}
|
||||
|
||||
members[i] = dto.ScimGroupMember{
|
||||
Value: userResource.GetID(),
|
||||
}
|
||||
}
|
||||
|
||||
groupPayload := dto.ScimGroup{
|
||||
ScimResourceData: dto.ScimResourceData{
|
||||
Schemas: []string{scimGroupSchema},
|
||||
ExternalID: group.ID,
|
||||
},
|
||||
Display: group.FriendlyName,
|
||||
Members: members,
|
||||
}
|
||||
|
||||
// If the group exists on the SCIM provider, and it has been modified, update it
|
||||
if groupResource != nil {
|
||||
if group.LastModified().Before(groupResource.GetMeta().LastModified) {
|
||||
return scimActionNone, nil
|
||||
}
|
||||
path := fmt.Sprintf("/Groups/%s", url.PathEscape(groupResource.GetID()))
|
||||
_, err := updateScimResource(s, ctx, provider, path, groupPayload)
|
||||
if err != nil {
|
||||
return scimActionNone, err
|
||||
}
|
||||
return scimActionUpdated, nil
|
||||
}
|
||||
|
||||
// Otherwise, create a new SCIM group
|
||||
_, err := createScimResource(s, ctx, provider, "/Groups", groupPayload)
|
||||
if err != nil {
|
||||
return scimActionNone, err
|
||||
}
|
||||
|
||||
return scimActionCreated, nil
|
||||
}
|
||||
|
||||
func groupAllowedForClient(groupID string, client model.OidcClient) bool {
|
||||
if !client.IsGroupRestricted {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, allowedGroup := range client.AllowedUserGroups {
|
||||
if allowedGroup.ID == groupID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func groupIDs(groups []model.UserGroup) []string {
|
||||
ids := make([]string, len(groups))
|
||||
for i, g := range groups {
|
||||
ids[i] = g.ID
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
func (s *ScimService) groupsForClient(
|
||||
ctx context.Context,
|
||||
client model.OidcClient,
|
||||
allowedGroupIDs []string,
|
||||
) ([]model.UserGroup, error) {
|
||||
var groups []model.UserGroup
|
||||
|
||||
query := s.db.WithContext(ctx).Preload("Users").Model(&model.UserGroup{})
|
||||
if client.IsGroupRestricted {
|
||||
if len(allowedGroupIDs) == 0 {
|
||||
return groups, nil
|
||||
}
|
||||
query = query.Where("id IN ?", allowedGroupIDs)
|
||||
}
|
||||
|
||||
if err := query.Find(&groups).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) usersForClient(
|
||||
ctx context.Context,
|
||||
client model.OidcClient,
|
||||
allowedGroupIDs []string,
|
||||
) ([]model.User, error) {
|
||||
var users []model.User
|
||||
|
||||
query := s.db.WithContext(ctx).Model(&model.User{})
|
||||
if client.IsGroupRestricted {
|
||||
if len(allowedGroupIDs) == 0 {
|
||||
return users, nil
|
||||
}
|
||||
query = query.
|
||||
Joins("JOIN user_groups_users ON users.id = user_groups_users.user_id").
|
||||
Where("user_groups_users.user_group_id IN ?", allowedGroupIDs).
|
||||
Select("users.*").
|
||||
Distinct()
|
||||
}
|
||||
|
||||
query = query.Preload("UserGroups")
|
||||
|
||||
if err := query.Find(&users).Error; err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return users, nil
|
||||
}
|
||||
|
||||
func getResourceByExternalID[T dto.ScimResource](externalID string, resource []T) *T {
|
||||
for i := range resource {
|
||||
if resource[i].GetExternalID() == externalID {
|
||||
return &resource[i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func listScimResources[T any](
|
||||
s *ScimService,
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
path string,
|
||||
) (result dto.ScimListResponse[T], err error) {
|
||||
startIndex := 1
|
||||
count := 1000
|
||||
|
||||
for {
|
||||
// Use SCIM pagination to avoid missing resources on large providers
|
||||
queryParams := map[string]string{
|
||||
"startIndex": strconv.Itoa(startIndex),
|
||||
"count": strconv.Itoa(count),
|
||||
}
|
||||
|
||||
resp, err := s.scimRequest(ctx, provider, http.MethodGet, path, nil, queryParams)
|
||||
if err != nil {
|
||||
return dto.ScimListResponse[T]{}, err
|
||||
}
|
||||
|
||||
if err := ensureScimStatus(ctx, resp, provider, http.StatusOK); err != nil {
|
||||
return dto.ScimListResponse[T]{}, err
|
||||
}
|
||||
|
||||
var page dto.ScimListResponse[T]
|
||||
if err := json.NewDecoder(resp.Body).Decode(&page); err != nil {
|
||||
return dto.ScimListResponse[T]{}, fmt.Errorf("failed to decode SCIM list response: %w", err)
|
||||
}
|
||||
|
||||
resp.Body.Close()
|
||||
|
||||
// Initialize metadata only once
|
||||
if result.TotalResults == 0 {
|
||||
result.TotalResults = page.TotalResults
|
||||
}
|
||||
|
||||
result.Resources = append(result.Resources, page.Resources...)
|
||||
|
||||
// If we've fetched everything, stop
|
||||
if len(result.Resources) >= page.TotalResults || len(page.Resources) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
startIndex += page.ItemsPerPage
|
||||
}
|
||||
|
||||
result.ItemsPerPage = len(result.Resources)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func createScimResource[T dto.ScimResource](
|
||||
s *ScimService,
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
path string, payload T) (*T, error) {
|
||||
resp, err := s.scimRequest(ctx, provider, http.MethodPost, path, payload, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := ensureScimStatus(ctx, resp, provider, http.StatusOK, http.StatusCreated); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resource T
|
||||
if err := json.NewDecoder(resp.Body).Decode(&resource); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode SCIM create response: %w", err)
|
||||
}
|
||||
|
||||
return &resource, nil
|
||||
}
|
||||
|
||||
func updateScimResource[T dto.ScimResource](
|
||||
s *ScimService,
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
path string,
|
||||
payload T,
|
||||
) (*T, error) {
|
||||
resp, err := s.scimRequest(ctx, provider, http.MethodPut, path, payload, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := ensureScimStatus(ctx, resp, provider, http.StatusOK, http.StatusCreated); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resource T
|
||||
if err := json.NewDecoder(resp.Body).Decode(&resource); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode SCIM update response: %w", err)
|
||||
}
|
||||
|
||||
return &resource, nil
|
||||
}
|
||||
|
||||
func (s *ScimService) deleteScimResource(ctx context.Context, provider model.ScimServiceProvider, path string) error {
|
||||
resp, err := s.scimRequest(ctx, provider, http.MethodDelete, path, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
return ensureScimStatus(ctx, resp, provider, http.StatusOK, http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (s *ScimService) scimRequest(
|
||||
ctx context.Context,
|
||||
provider model.ScimServiceProvider,
|
||||
method,
|
||||
path string,
|
||||
payload any,
|
||||
queryParams map[string]string,
|
||||
) (*http.Response, error) {
|
||||
urlString, err := scimURL(provider.Endpoint, path, queryParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var bodyBytes []byte
|
||||
if payload != nil {
|
||||
encoded, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to encode SCIM payload: %w", err)
|
||||
}
|
||||
bodyBytes = encoded
|
||||
}
|
||||
|
||||
retryAttempts := 3
|
||||
for attempt := 1; attempt <= retryAttempts; attempt++ {
|
||||
var body io.Reader
|
||||
if bodyBytes != nil {
|
||||
body = bytes.NewReader(bodyBytes)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, method, urlString, body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", scimContentType)
|
||||
if payload != nil {
|
||||
req.Header.Set("Content-Type", scimContentType)
|
||||
}
|
||||
token := string(provider.Token)
|
||||
if token != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+token)
|
||||
}
|
||||
|
||||
slog.Debug("Sending SCIM request",
|
||||
slog.String("method", method),
|
||||
slog.String("url", urlString),
|
||||
slog.String("provider_id", provider.ID),
|
||||
)
|
||||
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Only retry on 429 to avoid masking other errors
|
||||
if resp.StatusCode != http.StatusTooManyRequests || attempt == retryAttempts {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
retryDelay := scimRetryDelay(resp.Header.Get("Retry-After"), attempt)
|
||||
slog.WarnContext(ctx, "SCIM provider rate-limited, retrying",
|
||||
slog.String("provider_id", provider.ID),
|
||||
slog.String("method", method),
|
||||
slog.String("url", urlString),
|
||||
slog.Int("attempt", attempt),
|
||||
slog.Duration("retry_after", retryDelay),
|
||||
)
|
||||
|
||||
resp.Body.Close()
|
||||
if err := utils.SleepWithContext(ctx, retryDelay); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("scim request retry attempts exceeded")
|
||||
}
|
||||
|
||||
func scimRetryDelay(retryAfter string, attempt int) time.Duration {
|
||||
// Respect Retry-After when provided
|
||||
if retryAfter != "" {
|
||||
if seconds, err := strconv.Atoi(retryAfter); err == nil {
|
||||
return time.Duration(seconds) * time.Second
|
||||
}
|
||||
if t, err := http.ParseTime(retryAfter); err == nil {
|
||||
if delay := time.Until(t); delay > 0 {
|
||||
return delay
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Exponential backoff otherwise
|
||||
maxDelay := 10 * time.Second
|
||||
delay := 500 * time.Millisecond * (time.Duration(1) << (attempt - 1)) //nolint:gosec // attempt is bounded 1-3
|
||||
if delay > maxDelay {
|
||||
return maxDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
func scimURL(endpoint, p string, queryParams map[string]string) (string, error) {
|
||||
u, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid scim endpoint: %w", err)
|
||||
}
|
||||
|
||||
u.Path = path.Join(strings.TrimRight(u.Path, "/"), p)
|
||||
|
||||
q := u.Query()
|
||||
for key, value := range queryParams {
|
||||
q.Set(key, value)
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String(), nil
|
||||
}
|
||||
|
||||
func ensureScimStatus(
|
||||
ctx context.Context,
|
||||
resp *http.Response,
|
||||
provider model.ScimServiceProvider,
|
||||
allowedStatuses ...int) error {
|
||||
for _, status := range allowedStatuses {
|
||||
if resp.StatusCode == status {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
body := readScimErrorBody(resp.Body)
|
||||
|
||||
slog.ErrorContext(ctx, "SCIM request failed",
|
||||
slog.String("provider_id", provider.ID),
|
||||
slog.String("method", resp.Request.Method),
|
||||
slog.String("url", resp.Request.URL.String()),
|
||||
slog.Int("status", resp.StatusCode),
|
||||
slog.String("response_body", body),
|
||||
)
|
||||
|
||||
return fmt.Errorf("scim request failed with status %d: %s", resp.StatusCode, body)
|
||||
}
|
||||
|
||||
func readScimErrorBody(body io.Reader) string {
|
||||
payload, err := io.ReadAll(io.LimitReader(body, scimErrorBodyLimit))
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(string(payload))
|
||||
}
|
||||
@@ -3,7 +3,9 @@ package service
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
@@ -14,14 +16,15 @@ import (
|
||||
|
||||
type UserGroupService struct {
|
||||
db *gorm.DB
|
||||
scimService *ScimService
|
||||
appConfigService *AppConfigService
|
||||
}
|
||||
|
||||
func NewUserGroupService(db *gorm.DB, appConfigService *AppConfigService) *UserGroupService {
|
||||
return &UserGroupService{db: db, appConfigService: appConfigService}
|
||||
func NewUserGroupService(db *gorm.DB, appConfigService *AppConfigService, scimService *ScimService) *UserGroupService {
|
||||
return &UserGroupService{db: db, appConfigService: appConfigService, scimService: scimService}
|
||||
}
|
||||
|
||||
func (s *UserGroupService) List(ctx context.Context, name string, sortedPaginationRequest utils.SortedPaginationRequest) (groups []model.UserGroup, response utils.PaginationResponse, err error) {
|
||||
func (s *UserGroupService) List(ctx context.Context, name string, listRequestOptions utils.ListRequestOptions) (groups []model.UserGroup, response utils.PaginationResponse, err error) {
|
||||
query := s.db.
|
||||
WithContext(ctx).
|
||||
Preload("CustomClaims").
|
||||
@@ -32,17 +35,14 @@ func (s *UserGroupService) List(ctx context.Context, name string, sortedPaginati
|
||||
}
|
||||
|
||||
// As userCount is not a column we need to manually sort it
|
||||
if sortedPaginationRequest.Sort.Column == "userCount" && utils.IsValidSortDirection(sortedPaginationRequest.Sort.Direction) {
|
||||
if listRequestOptions.Sort.Column == "userCount" && utils.IsValidSortDirection(listRequestOptions.Sort.Direction) {
|
||||
query = query.Select("user_groups.*, COUNT(user_groups_users.user_id)").
|
||||
Joins("LEFT JOIN user_groups_users ON user_groups.id = user_groups_users.user_group_id").
|
||||
Group("user_groups.id").
|
||||
Order("COUNT(user_groups_users.user_id) " + sortedPaginationRequest.Sort.Direction)
|
||||
|
||||
response, err := utils.Paginate(sortedPaginationRequest.Pagination.Page, sortedPaginationRequest.Pagination.Limit, query, &groups)
|
||||
return groups, response, err
|
||||
Order("COUNT(user_groups_users.user_id) " + listRequestOptions.Sort.Direction)
|
||||
}
|
||||
|
||||
response, err = utils.PaginateAndSort(sortedPaginationRequest, query, &groups)
|
||||
response, err = utils.PaginateFilterAndSort(listRequestOptions, query, &groups)
|
||||
return groups, response, err
|
||||
}
|
||||
|
||||
@@ -56,6 +56,7 @@ func (s *UserGroupService) getInternal(ctx context.Context, id string, tx *gorm.
|
||||
Where("id = ?", id).
|
||||
Preload("CustomClaims").
|
||||
Preload("Users").
|
||||
Preload("AllowedOidcClients").
|
||||
First(&group).
|
||||
Error
|
||||
return group, err
|
||||
@@ -90,7 +91,13 @@ func (s *UserGroupService) Delete(ctx context.Context, id string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit().Error
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UserGroupService) Create(ctx context.Context, input dto.UserGroupCreateDto) (group model.UserGroup, err error) {
|
||||
@@ -118,6 +125,8 @@ func (s *UserGroupService) createInternal(ctx context.Context, input dto.UserGro
|
||||
}
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return group, nil
|
||||
}
|
||||
|
||||
@@ -153,6 +162,7 @@ func (s *UserGroupService) updateInternal(ctx context.Context, id string, input
|
||||
|
||||
group.Name = input.Name
|
||||
group.FriendlyName = input.FriendlyName
|
||||
group.UpdatedAt = utils.Ptr(datatype.DateTime(time.Now()))
|
||||
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
@@ -164,6 +174,8 @@ func (s *UserGroupService) updateInternal(ctx context.Context, id string, input
|
||||
} else if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return group, nil
|
||||
}
|
||||
|
||||
@@ -216,6 +228,8 @@ func (s *UserGroupService) updateUsersInternal(ctx context.Context, id string, u
|
||||
}
|
||||
|
||||
// Save the updated group
|
||||
group.UpdatedAt = utils.Ptr(datatype.DateTime(time.Now()))
|
||||
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Save(&group).
|
||||
@@ -224,6 +238,7 @@ func (s *UserGroupService) updateUsersInternal(ctx context.Context, id string, u
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return group, nil
|
||||
}
|
||||
|
||||
@@ -251,3 +266,55 @@ func (s *UserGroupService) GetUserCountOfGroup(ctx context.Context, id string) (
|
||||
Count()
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s *UserGroupService) UpdateAllowedOidcClient(ctx context.Context, id string, input dto.UserGroupUpdateAllowedOidcClientsDto) (group model.UserGroup, err error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
group, err = s.getInternal(ctx, id, tx)
|
||||
if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
// Fetch the clients based on the client IDs
|
||||
var clients []model.OidcClient
|
||||
if len(input.OidcClientIDs) > 0 {
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Where("id IN (?)", input.OidcClientIDs).
|
||||
Find(&clients).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the current clients with the new set of clients
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Model(&group).
|
||||
Association("AllowedOidcClients").
|
||||
Replace(clients)
|
||||
if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
// Save the updated group
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Save(&group).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return model.UserGroup{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return group, nil
|
||||
}
|
||||
|
||||
@@ -7,20 +7,23 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log/slog"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/common"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/dto"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/storage"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils"
|
||||
"github.com/pocket-id/pocket-id/backend/internal/utils/email"
|
||||
profilepicture "github.com/pocket-id/pocket-id/backend/internal/utils/image"
|
||||
@@ -33,9 +36,12 @@ type UserService struct {
|
||||
emailService *EmailService
|
||||
appConfigService *AppConfigService
|
||||
customClaimService *CustomClaimService
|
||||
appImagesService *AppImagesService
|
||||
scimService *ScimService
|
||||
fileStorage storage.FileStorage
|
||||
}
|
||||
|
||||
func NewUserService(db *gorm.DB, jwtService *JwtService, auditLogService *AuditLogService, emailService *EmailService, appConfigService *AppConfigService, customClaimService *CustomClaimService) *UserService {
|
||||
func NewUserService(db *gorm.DB, jwtService *JwtService, auditLogService *AuditLogService, emailService *EmailService, appConfigService *AppConfigService, customClaimService *CustomClaimService, appImagesService *AppImagesService, scimService *ScimService, fileStorage storage.FileStorage) *UserService {
|
||||
return &UserService{
|
||||
db: db,
|
||||
jwtService: jwtService,
|
||||
@@ -43,10 +49,13 @@ func NewUserService(db *gorm.DB, jwtService *JwtService, auditLogService *AuditL
|
||||
emailService: emailService,
|
||||
appConfigService: appConfigService,
|
||||
customClaimService: customClaimService,
|
||||
appImagesService: appImagesService,
|
||||
scimService: scimService,
|
||||
fileStorage: fileStorage,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *UserService) ListUsers(ctx context.Context, searchTerm string, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.User, utils.PaginationResponse, error) {
|
||||
func (s *UserService) ListUsers(ctx context.Context, searchTerm string, listRequestOptions utils.ListRequestOptions) ([]model.User, utils.PaginationResponse, error) {
|
||||
var users []model.User
|
||||
query := s.db.WithContext(ctx).
|
||||
Model(&model.User{}).
|
||||
@@ -60,7 +69,7 @@ func (s *UserService) ListUsers(ctx context.Context, searchTerm string, sortedPa
|
||||
searchPattern, searchPattern, searchPattern, searchPattern)
|
||||
}
|
||||
|
||||
pagination, err := utils.PaginateAndSort(sortedPaginationRequest, query, &users)
|
||||
pagination, err := utils.PaginateFilterAndSort(listRequestOptions, query, &users)
|
||||
|
||||
return users, pagination, err
|
||||
}
|
||||
@@ -87,39 +96,42 @@ func (s *UserService) GetProfilePicture(ctx context.Context, userID string) (io.
|
||||
return nil, 0, &common.InvalidUUIDError{}
|
||||
}
|
||||
|
||||
// First check for a custom uploaded profile picture (userID.png)
|
||||
profilePicturePath := common.EnvConfig.UploadPath + "/profile-pictures/" + userID + ".png"
|
||||
file, err := os.Open(profilePicturePath)
|
||||
if err == nil {
|
||||
// Get the file size
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
return file, fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
// If no custom picture exists, get the user's data for creating initials
|
||||
user, err := s.GetUser(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Check if we have a cached default picture for these initials
|
||||
defaultProfilePicturesDir := common.EnvConfig.UploadPath + "/profile-pictures/defaults/"
|
||||
defaultPicturePath := defaultProfilePicturesDir + user.Initials() + ".png"
|
||||
file, err = os.Open(defaultPicturePath)
|
||||
profilePicturePath := path.Join("profile-pictures", userID+".png")
|
||||
|
||||
// Try custom profile picture
|
||||
file, size, err := s.fileStorage.Open(ctx, profilePicturePath)
|
||||
if err == nil {
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
return file, fileInfo.Size(), nil
|
||||
return file, size, nil
|
||||
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// If no cached default picture exists, create one and save it for future use
|
||||
// Try default global profile picture
|
||||
if s.appImagesService.IsDefaultProfilePictureSet() {
|
||||
reader, size, _, err := s.appImagesService.GetImage(ctx, "default-profile-picture")
|
||||
if err == nil {
|
||||
return reader, size, nil
|
||||
}
|
||||
if !errors.Is(err, &common.ImageNotFoundError{}) {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Try cached default for initials
|
||||
defaultPicturePath := path.Join("profile-pictures", "defaults", user.Initials()+".png")
|
||||
file, size, err = s.fileStorage.Open(ctx, defaultPicturePath)
|
||||
if err == nil {
|
||||
return file, size, nil
|
||||
} else if !errors.Is(err, fs.ErrNotExist) {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// Create and return generated default with initials
|
||||
defaultPicture, err := profilepicture.CreateDefaultProfilePicture(user.Initials())
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@@ -127,20 +139,16 @@ func (s *UserService) GetProfilePicture(ctx context.Context, userID string) (io.
|
||||
|
||||
// Save the default picture for future use (in a goroutine to avoid blocking)
|
||||
defaultPictureBytes := defaultPicture.Bytes()
|
||||
//nolint:contextcheck
|
||||
go func() {
|
||||
// Ensure the directory exists
|
||||
errInternal := os.MkdirAll(defaultProfilePicturesDir, os.ModePerm)
|
||||
if errInternal != nil {
|
||||
slog.Error("Failed to create directory for default profile picture", slog.Any("error", errInternal))
|
||||
return
|
||||
}
|
||||
errInternal = utils.SaveFileStream(bytes.NewReader(defaultPictureBytes), defaultPicturePath)
|
||||
if errInternal != nil {
|
||||
slog.Error("Failed to cache default profile picture for initials", slog.String("initials", user.Initials()), slog.Any("error", errInternal))
|
||||
// Use bytes.NewReader because we need an io.ReadSeeker
|
||||
rErr := s.fileStorage.Save(context.Background(), defaultPicturePath, bytes.NewReader(defaultPictureBytes))
|
||||
if rErr != nil {
|
||||
slog.Error("Failed to cache default profile picture", slog.String("initials", user.Initials()), slog.Any("error", rErr))
|
||||
}
|
||||
}()
|
||||
|
||||
return io.NopCloser(bytes.NewReader(defaultPictureBytes)), int64(defaultPicture.Len()), nil
|
||||
return io.NopCloser(bytes.NewReader(defaultPictureBytes)), int64(len(defaultPictureBytes)), nil
|
||||
}
|
||||
|
||||
func (s *UserService) GetUserGroups(ctx context.Context, userID string) ([]model.UserGroup, error) {
|
||||
@@ -157,7 +165,7 @@ func (s *UserService) GetUserGroups(ctx context.Context, userID string) ([]model
|
||||
return user.UserGroups, nil
|
||||
}
|
||||
|
||||
func (s *UserService) UpdateProfilePicture(userID string, file io.Reader) error {
|
||||
func (s *UserService) UpdateProfilePicture(ctx context.Context, userID string, file io.ReadSeeker) error {
|
||||
// Validate the user ID to prevent directory traversal
|
||||
err := uuid.Validate(userID)
|
||||
if err != nil {
|
||||
@@ -170,15 +178,8 @@ func (s *UserService) UpdateProfilePicture(userID string, file io.Reader) error
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure the directory exists
|
||||
profilePictureDir := common.EnvConfig.UploadPath + "/profile-pictures"
|
||||
err = os.MkdirAll(profilePictureDir, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create the profile picture file
|
||||
err = utils.SaveFileStream(profilePicture, profilePictureDir+"/"+userID+".png")
|
||||
profilePicturePath := path.Join("profile-pictures", userID+".png")
|
||||
err = s.fileStorage.Save(ctx, profilePicturePath, profilePicture)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,17 +188,30 @@ func (s *UserService) UpdateProfilePicture(userID string, file io.Reader) error
|
||||
}
|
||||
|
||||
func (s *UserService) DeleteUser(ctx context.Context, userID string, allowLdapDelete bool) error {
|
||||
return s.db.Transaction(func(tx *gorm.DB) error {
|
||||
return s.deleteUserInternal(ctx, userID, allowLdapDelete, tx)
|
||||
err := s.db.Transaction(func(tx *gorm.DB) error {
|
||||
return s.deleteUserInternal(ctx, tx, userID, allowLdapDelete)
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete user '%s': %w", userID, err)
|
||||
}
|
||||
|
||||
// Storage operations must be executed outside of a transaction
|
||||
profilePicturePath := path.Join("profile-pictures", userID+".png")
|
||||
err = s.fileStorage.Delete(ctx, profilePicturePath)
|
||||
if err != nil && !storage.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to delete profile picture for user '%s': %w", userID, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UserService) deleteUserInternal(ctx context.Context, userID string, allowLdapDelete bool, tx *gorm.DB) error {
|
||||
func (s *UserService) deleteUserInternal(ctx context.Context, tx *gorm.DB, userID string, allowLdapDelete bool) error {
|
||||
var user model.User
|
||||
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Where("id = ?", userID).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&user).
|
||||
Error
|
||||
if err != nil {
|
||||
@@ -209,18 +223,12 @@ func (s *UserService) deleteUserInternal(ctx context.Context, userID string, all
|
||||
return &common.LdapUserUpdateError{}
|
||||
}
|
||||
|
||||
// Delete the profile picture
|
||||
profilePicturePath := common.EnvConfig.UploadPath + "/profile-pictures/" + userID + ".png"
|
||||
err = os.Remove(profilePicturePath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.WithContext(ctx).Delete(&user).Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete user: %w", err)
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -248,6 +256,18 @@ func (s *UserService) createUserInternal(ctx context.Context, input dto.UserCrea
|
||||
return model.User{}, &common.UserEmailNotSetError{}
|
||||
}
|
||||
|
||||
var userGroups []model.UserGroup
|
||||
if len(input.UserGroupIds) > 0 {
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Where("id IN ?", input.UserGroupIds).
|
||||
Find(&userGroups).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.User{}, err
|
||||
}
|
||||
}
|
||||
|
||||
user := model.User{
|
||||
FirstName: input.FirstName,
|
||||
LastName: input.LastName,
|
||||
@@ -256,6 +276,8 @@ func (s *UserService) createUserInternal(ctx context.Context, input dto.UserCrea
|
||||
Username: input.Username,
|
||||
IsAdmin: input.IsAdmin,
|
||||
Locale: input.Locale,
|
||||
Disabled: input.Disabled,
|
||||
UserGroups: userGroups,
|
||||
}
|
||||
if input.LdapID != "" {
|
||||
user.LdapID = &input.LdapID
|
||||
@@ -279,42 +301,66 @@ func (s *UserService) createUserInternal(ctx context.Context, input dto.UserCrea
|
||||
|
||||
// Apply default groups and claims for new non-LDAP users
|
||||
if !isLdapSync {
|
||||
if err := s.applySignupDefaults(ctx, &user, tx); err != nil {
|
||||
if len(input.UserGroupIds) == 0 {
|
||||
if err := s.applyDefaultGroups(ctx, &user, tx); err != nil {
|
||||
return model.User{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.applyDefaultCustomClaims(ctx, &user, tx); err != nil {
|
||||
return model.User{}, err
|
||||
}
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (s *UserService) applySignupDefaults(ctx context.Context, user *model.User, tx *gorm.DB) error {
|
||||
func (s *UserService) applyDefaultGroups(ctx context.Context, user *model.User, tx *gorm.DB) error {
|
||||
config := s.appConfigService.GetDbConfig()
|
||||
|
||||
// Apply default user groups
|
||||
var groupIDs []string
|
||||
if v := config.SignupDefaultUserGroupIDs.Value; v != "" && v != "[]" {
|
||||
if err := json.Unmarshal([]byte(v), &groupIDs); err != nil {
|
||||
v := config.SignupDefaultUserGroupIDs.Value
|
||||
if v != "" && v != "[]" {
|
||||
err := json.Unmarshal([]byte(v), &groupIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SignupDefaultUserGroupIDs JSON: %w", err)
|
||||
}
|
||||
if len(groupIDs) > 0 {
|
||||
var groups []model.UserGroup
|
||||
if err := tx.WithContext(ctx).Where("id IN ?", groupIDs).Find(&groups).Error; err != nil {
|
||||
err = tx.WithContext(ctx).
|
||||
Where("id IN ?", groupIDs).
|
||||
Find(&groups).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find default user groups: %w", err)
|
||||
}
|
||||
if err := tx.WithContext(ctx).Model(user).Association("UserGroups").Replace(groups); err != nil {
|
||||
|
||||
err = tx.WithContext(ctx).
|
||||
Model(user).
|
||||
Association("UserGroups").
|
||||
Replace(groups)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to associate default user groups: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UserService) applyDefaultCustomClaims(ctx context.Context, user *model.User, tx *gorm.DB) error {
|
||||
config := s.appConfigService.GetDbConfig()
|
||||
|
||||
// Apply default custom claims
|
||||
var claims []dto.CustomClaimCreateDto
|
||||
if v := config.SignupDefaultCustomClaims.Value; v != "" && v != "[]" {
|
||||
if err := json.Unmarshal([]byte(v), &claims); err != nil {
|
||||
v := config.SignupDefaultCustomClaims.Value
|
||||
if v != "" && v != "[]" {
|
||||
err := json.Unmarshal([]byte(v), &claims)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid SignupDefaultCustomClaims JSON: %w", err)
|
||||
}
|
||||
if len(claims) > 0 {
|
||||
if _, err := s.customClaimService.updateCustomClaimsInternal(ctx, UserID, user.ID, claims, tx); err != nil {
|
||||
_, err = s.customClaimService.updateCustomClaimsInternal(ctx, UserID, user.ID, claims, tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to apply default custom claims: %w", err)
|
||||
}
|
||||
}
|
||||
@@ -351,6 +397,7 @@ func (s *UserService) updateUserInternal(ctx context.Context, userID string, upd
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Where("id = ?", userID).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&user).
|
||||
Error
|
||||
if err != nil {
|
||||
@@ -383,6 +430,8 @@ func (s *UserService) updateUserInternal(ctx context.Context, userID string, upd
|
||||
}
|
||||
}
|
||||
|
||||
user.UpdatedAt = utils.Ptr(datatype.DateTime(time.Now()))
|
||||
|
||||
err = tx.
|
||||
WithContext(ctx).
|
||||
Save(&user).
|
||||
@@ -402,6 +451,7 @@ func (s *UserService) updateUserInternal(ctx context.Context, userID string, upd
|
||||
return user, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return user, nil
|
||||
}
|
||||
|
||||
@@ -411,30 +461,36 @@ func (s *UserService) RequestOneTimeAccessEmailAsAdmin(ctx context.Context, user
|
||||
return &common.OneTimeAccessDisabledError{}
|
||||
}
|
||||
|
||||
return s.requestOneTimeAccessEmailInternal(ctx, userID, "", ttl)
|
||||
_, err := s.requestOneTimeAccessEmailInternal(ctx, userID, "", ttl, true)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *UserService) RequestOneTimeAccessEmailAsUnauthenticatedUser(ctx context.Context, userID, redirectPath string) error {
|
||||
func (s *UserService) RequestOneTimeAccessEmailAsUnauthenticatedUser(ctx context.Context, userID, redirectPath string) (string, error) {
|
||||
isDisabled := !s.appConfigService.GetDbConfig().EmailOneTimeAccessAsUnauthenticatedEnabled.IsTrue()
|
||||
if isDisabled {
|
||||
return &common.OneTimeAccessDisabledError{}
|
||||
return "", &common.OneTimeAccessDisabledError{}
|
||||
}
|
||||
|
||||
var userId string
|
||||
err := s.db.Model(&model.User{}).Select("id").Where("email = ?", userID).First(&userId).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// Do not return error if user not found to prevent email enumeration
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return "", nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.requestOneTimeAccessEmailInternal(ctx, userId, redirectPath, 15*time.Minute)
|
||||
deviceToken, err := s.requestOneTimeAccessEmailInternal(ctx, userId, redirectPath, 15*time.Minute, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else if deviceToken == nil {
|
||||
return "", errors.New("device token expected but not returned")
|
||||
}
|
||||
|
||||
return *deviceToken, nil
|
||||
}
|
||||
|
||||
func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, userID, redirectPath string, ttl time.Duration) error {
|
||||
func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, userID, redirectPath string, ttl time.Duration, withDeviceToken bool) (*string, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
@@ -442,21 +498,20 @@ func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, use
|
||||
|
||||
user, err := s.GetUser(ctx, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if user.Email == nil {
|
||||
return &common.UserEmailNotSetError{}
|
||||
return nil, &common.UserEmailNotSetError{}
|
||||
}
|
||||
|
||||
oneTimeAccessToken, err := s.createOneTimeAccessTokenInternal(ctx, user.ID, ttl, tx)
|
||||
oneTimeAccessToken, deviceToken, err := s.createOneTimeAccessTokenInternal(ctx, user.ID, ttl, withDeviceToken, tx)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We use a background context here as this is running in a goroutine
|
||||
@@ -489,28 +544,29 @@ func (s *UserService) requestOneTimeAccessEmailInternal(ctx context.Context, use
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
return deviceToken, nil
|
||||
}
|
||||
|
||||
func (s *UserService) CreateOneTimeAccessToken(ctx context.Context, userID string, ttl time.Duration) (string, error) {
|
||||
return s.createOneTimeAccessTokenInternal(ctx, userID, ttl, s.db)
|
||||
func (s *UserService) CreateOneTimeAccessToken(ctx context.Context, userID string, ttl time.Duration) (token string, err error) {
|
||||
token, _, err = s.createOneTimeAccessTokenInternal(ctx, userID, ttl, false, s.db)
|
||||
return token, err
|
||||
}
|
||||
|
||||
func (s *UserService) createOneTimeAccessTokenInternal(ctx context.Context, userID string, ttl time.Duration, tx *gorm.DB) (string, error) {
|
||||
oneTimeAccessToken, err := NewOneTimeAccessToken(userID, ttl)
|
||||
func (s *UserService) createOneTimeAccessTokenInternal(ctx context.Context, userID string, ttl time.Duration, withDeviceToken bool, tx *gorm.DB) (token string, deviceToken *string, err error) {
|
||||
oneTimeAccessToken, err := NewOneTimeAccessToken(userID, ttl, withDeviceToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
err = tx.WithContext(ctx).Create(oneTimeAccessToken).Error
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return oneTimeAccessToken.Token, nil
|
||||
return oneTimeAccessToken.Token, oneTimeAccessToken.DeviceToken, nil
|
||||
}
|
||||
|
||||
func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token string, ipAddress, userAgent string) (model.User, string, error) {
|
||||
func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token, deviceToken, ipAddress, userAgent string) (model.User, string, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
@@ -519,7 +575,9 @@ func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token stri
|
||||
var oneTimeAccessToken model.OneTimeAccessToken
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Where("token = ? AND expires_at > ?", token, datatype.DateTime(time.Now())).Preload("User").
|
||||
Where("token = ? AND expires_at > ?", token, datatype.DateTime(time.Now())).
|
||||
Preload("User").
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&oneTimeAccessToken).
|
||||
Error
|
||||
if err != nil {
|
||||
@@ -528,6 +586,10 @@ func (s *UserService) ExchangeOneTimeAccessToken(ctx context.Context, token stri
|
||||
}
|
||||
return model.User{}, "", err
|
||||
}
|
||||
if oneTimeAccessToken.DeviceToken != nil && deviceToken != *oneTimeAccessToken.DeviceToken {
|
||||
return model.User{}, "", &common.DeviceCodeInvalid{}
|
||||
}
|
||||
|
||||
accessToken, err := s.jwtService.GenerateAccessToken(oneTimeAccessToken.User)
|
||||
if err != nil {
|
||||
return model.User{}, "", err
|
||||
@@ -591,11 +653,22 @@ func (s *UserService) UpdateUserGroups(ctx context.Context, id string, userGroup
|
||||
return model.User{}, err
|
||||
}
|
||||
|
||||
// Update the UpdatedAt field for all affected groups
|
||||
now := time.Now()
|
||||
for _, group := range groups {
|
||||
group.UpdatedAt = utils.Ptr(datatype.DateTime(now))
|
||||
err = tx.WithContext(ctx).Save(&group).Error
|
||||
if err != nil {
|
||||
return model.User{}, err
|
||||
}
|
||||
}
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return model.User{}, err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return user, nil
|
||||
}
|
||||
|
||||
@@ -672,44 +745,51 @@ func (s *UserService) checkDuplicatedFields(ctx context.Context, user model.User
|
||||
}
|
||||
|
||||
// ResetProfilePicture deletes a user's custom profile picture
|
||||
func (s *UserService) ResetProfilePicture(userID string) error {
|
||||
func (s *UserService) ResetProfilePicture(ctx context.Context, userID string) error {
|
||||
// Validate the user ID to prevent directory traversal
|
||||
if err := uuid.Validate(userID); err != nil {
|
||||
return &common.InvalidUUIDError{}
|
||||
}
|
||||
|
||||
// Build path to profile picture
|
||||
profilePicturePath := common.EnvConfig.UploadPath + "/profile-pictures/" + userID + ".png"
|
||||
|
||||
// Check if file exists and delete it
|
||||
if _, err := os.Stat(profilePicturePath); err == nil {
|
||||
if err := os.Remove(profilePicturePath); err != nil {
|
||||
return fmt.Errorf("failed to delete profile picture: %w", err)
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
// If any error other than "file not exists"
|
||||
return fmt.Errorf("failed to check if profile picture exists: %w", err)
|
||||
profilePicturePath := path.Join("profile-pictures", userID+".png")
|
||||
if err := s.fileStorage.Delete(ctx, profilePicturePath); err != nil {
|
||||
return fmt.Errorf("failed to delete profile picture: %w", err)
|
||||
}
|
||||
// It's okay if the file doesn't exist - just means there's no custom picture to delete
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UserService) disableUserInternal(ctx context.Context, userID string, tx *gorm.DB) error {
|
||||
return tx.
|
||||
func (s *UserService) disableUserInternal(ctx context.Context, tx *gorm.DB, userID string) error {
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Model(&model.User{}).
|
||||
Where("id = ?", userID).
|
||||
Update("disabled", true).
|
||||
Error
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.scimService.ScheduleSync()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *UserService) CreateSignupToken(ctx context.Context, ttl time.Duration, usageLimit int) (model.SignupToken, error) {
|
||||
func (s *UserService) CreateSignupToken(ctx context.Context, ttl time.Duration, usageLimit int, userGroupIDs []string) (model.SignupToken, error) {
|
||||
signupToken, err := NewSignupToken(ttl, usageLimit)
|
||||
if err != nil {
|
||||
return model.SignupToken{}, err
|
||||
}
|
||||
|
||||
var userGroups []model.UserGroup
|
||||
err = s.db.WithContext(ctx).
|
||||
Where("id IN ?", userGroupIDs).
|
||||
Find(&userGroups).
|
||||
Error
|
||||
if err != nil {
|
||||
return model.SignupToken{}, err
|
||||
}
|
||||
signupToken.UserGroups = userGroups
|
||||
|
||||
err = s.db.WithContext(ctx).Create(signupToken).Error
|
||||
if err != nil {
|
||||
return model.SignupToken{}, err
|
||||
@@ -732,10 +812,13 @@ func (s *UserService) SignUp(ctx context.Context, signupData dto.SignUpDto, ipAd
|
||||
}
|
||||
|
||||
var signupToken model.SignupToken
|
||||
var userGroupIDs []string
|
||||
if tokenProvided {
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Preload("UserGroups").
|
||||
Where("token = ?", signupData.Token).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
First(&signupToken).
|
||||
Error
|
||||
if err != nil {
|
||||
@@ -748,14 +831,19 @@ func (s *UserService) SignUp(ctx context.Context, signupData dto.SignUpDto, ipAd
|
||||
if !signupToken.IsValid() {
|
||||
return model.User{}, "", &common.TokenInvalidOrExpiredError{}
|
||||
}
|
||||
|
||||
for _, group := range signupToken.UserGroups {
|
||||
userGroupIDs = append(userGroupIDs, group.ID)
|
||||
}
|
||||
}
|
||||
|
||||
userToCreate := dto.UserCreateDto{
|
||||
Username: signupData.Username,
|
||||
Email: signupData.Email,
|
||||
FirstName: signupData.FirstName,
|
||||
LastName: signupData.LastName,
|
||||
DisplayName: strings.TrimSpace(signupData.FirstName + " " + signupData.LastName),
|
||||
Username: signupData.Username,
|
||||
Email: signupData.Email,
|
||||
FirstName: signupData.FirstName,
|
||||
LastName: signupData.LastName,
|
||||
DisplayName: strings.TrimSpace(signupData.FirstName + " " + signupData.LastName),
|
||||
UserGroupIds: userGroupIDs,
|
||||
}
|
||||
|
||||
user, err := s.createUserInternal(ctx, userToCreate, false, tx)
|
||||
@@ -794,11 +882,11 @@ func (s *UserService) SignUp(ctx context.Context, signupData dto.SignUpDto, ipAd
|
||||
return user, accessToken, nil
|
||||
}
|
||||
|
||||
func (s *UserService) ListSignupTokens(ctx context.Context, sortedPaginationRequest utils.SortedPaginationRequest) ([]model.SignupToken, utils.PaginationResponse, error) {
|
||||
func (s *UserService) ListSignupTokens(ctx context.Context, listRequestOptions utils.ListRequestOptions) ([]model.SignupToken, utils.PaginationResponse, error) {
|
||||
var tokens []model.SignupToken
|
||||
query := s.db.WithContext(ctx).Model(&model.SignupToken{})
|
||||
query := s.db.WithContext(ctx).Preload("UserGroups").Model(&model.SignupToken{})
|
||||
|
||||
pagination, err := utils.PaginateAndSort(sortedPaginationRequest, query, &tokens)
|
||||
pagination, err := utils.PaginateFilterAndSort(listRequestOptions, query, &tokens)
|
||||
return tokens, pagination, err
|
||||
}
|
||||
|
||||
@@ -806,23 +894,33 @@ func (s *UserService) DeleteSignupToken(ctx context.Context, tokenID string) err
|
||||
return s.db.WithContext(ctx).Delete(&model.SignupToken{}, "id = ?", tokenID).Error
|
||||
}
|
||||
|
||||
func NewOneTimeAccessToken(userID string, ttl time.Duration) (*model.OneTimeAccessToken, error) {
|
||||
func NewOneTimeAccessToken(userID string, ttl time.Duration, withDeviceToken bool) (*model.OneTimeAccessToken, error) {
|
||||
// If expires at is less than 15 minutes, use a 6-character token instead of 16
|
||||
tokenLength := 16
|
||||
if ttl <= 15*time.Minute {
|
||||
tokenLength = 6
|
||||
}
|
||||
|
||||
randomString, err := utils.GenerateRandomAlphanumericString(tokenLength)
|
||||
token, err := utils.GenerateRandomUnambiguousString(tokenLength)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var deviceToken *string
|
||||
if withDeviceToken {
|
||||
dt, err := utils.GenerateRandomAlphanumericString(16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceToken = &dt
|
||||
}
|
||||
|
||||
now := time.Now().Round(time.Second)
|
||||
o := &model.OneTimeAccessToken{
|
||||
UserID: userID,
|
||||
ExpiresAt: datatype.DateTime(now.Add(ttl)),
|
||||
Token: randomString,
|
||||
UserID: userID,
|
||||
ExpiresAt: datatype.DateTime(now.Add(ttl)),
|
||||
Token: token,
|
||||
DeviceToken: deviceToken,
|
||||
}
|
||||
|
||||
return o, nil
|
||||
|
||||
@@ -58,7 +58,7 @@ func (s *VersionService) GetLatestVersion(ctx context.Context) (string, error) {
|
||||
}
|
||||
|
||||
if payload.TagName == "" {
|
||||
return "", fmt.Errorf("GitHub API returned empty tag name")
|
||||
return "", errors.New("GitHub API returned empty tag name")
|
||||
}
|
||||
|
||||
return strings.TrimPrefix(payload.TagName, "v"), nil
|
||||
|
||||
@@ -2,6 +2,8 @@ package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
@@ -81,6 +83,7 @@ func (s *WebAuthnService) BeginRegistration(ctx context.Context, userID string)
|
||||
&user,
|
||||
webauthn.WithResidentKeyRequirement(protocol.ResidentKeyRequirementRequired),
|
||||
webauthn.WithExclusions(user.WebAuthnCredentialDescriptors()),
|
||||
webauthn.WithExtensions(map[string]any{"credProps": true}), // Required for Firefox Android to properly save the key in Google password manager
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to begin WebAuthn registration: %w", err)
|
||||
@@ -89,6 +92,7 @@ func (s *WebAuthnService) BeginRegistration(ctx context.Context, userID string)
|
||||
sessionToStore := &model.WebauthnSession{
|
||||
ExpiresAt: datatype.DateTime(session.Expires),
|
||||
Challenge: session.Challenge,
|
||||
CredentialParams: session.CredParams,
|
||||
UserVerification: string(session.UserVerification),
|
||||
}
|
||||
|
||||
@@ -112,7 +116,7 @@ func (s *WebAuthnService) BeginRegistration(ctx context.Context, userID string)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *WebAuthnService) VerifyRegistration(ctx context.Context, sessionID, userID string, r *http.Request) (model.WebauthnCredential, error) {
|
||||
func (s *WebAuthnService) VerifyRegistration(ctx context.Context, sessionID string, userID string, r *http.Request, ipAddress string) (model.WebauthnCredential, error) {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
@@ -130,9 +134,10 @@ func (s *WebAuthnService) VerifyRegistration(ctx context.Context, sessionID, use
|
||||
}
|
||||
|
||||
session := webauthn.SessionData{
|
||||
Challenge: storedSession.Challenge,
|
||||
Expires: storedSession.ExpiresAt.ToTime(),
|
||||
UserID: []byte(userID),
|
||||
Challenge: storedSession.Challenge,
|
||||
Expires: storedSession.ExpiresAt.ToTime(),
|
||||
CredParams: storedSession.CredentialParams,
|
||||
UserID: []byte(userID),
|
||||
}
|
||||
|
||||
var user model.User
|
||||
@@ -170,6 +175,9 @@ func (s *WebAuthnService) VerifyRegistration(ctx context.Context, sessionID, use
|
||||
return model.WebauthnCredential{}, fmt.Errorf("failed to store WebAuthn credential: %w", err)
|
||||
}
|
||||
|
||||
auditLogData := model.AuditLogData{"credentialID": hex.EncodeToString(credential.ID), "passkeyName": passkeyName}
|
||||
s.auditLogService.Create(ctx, model.AuditLogEventPasskeyAdded, ipAddress, r.UserAgent(), userID, auditLogData, tx)
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return model.WebauthnCredential{}, fmt.Errorf("failed to commit transaction: %w", err)
|
||||
@@ -285,16 +293,30 @@ func (s *WebAuthnService) ListCredentials(ctx context.Context, userID string) ([
|
||||
return credentials, nil
|
||||
}
|
||||
|
||||
func (s *WebAuthnService) DeleteCredential(ctx context.Context, userID, credentialID string) error {
|
||||
err := s.db.
|
||||
func (s *WebAuthnService) DeleteCredential(ctx context.Context, userID string, credentialID string, ipAddress string, userAgent string) error {
|
||||
tx := s.db.Begin()
|
||||
defer func() {
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
credential := &model.WebauthnCredential{}
|
||||
err := tx.
|
||||
WithContext(ctx).
|
||||
Where("id = ? AND user_id = ?", credentialID, userID).
|
||||
Delete(&model.WebauthnCredential{}).
|
||||
Clauses(clause.Returning{}).
|
||||
Delete(credential, "id = ? AND user_id = ?", credentialID, userID).
|
||||
Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete record: %w", err)
|
||||
}
|
||||
|
||||
auditLogData := model.AuditLogData{"credentialID": hex.EncodeToString(credential.CredentialID), "passkeyName": credential.Name}
|
||||
s.auditLogService.Create(ctx, model.AuditLogEventPasskeyRemoved, ipAddress, userAgent, userID, auditLogData, tx)
|
||||
|
||||
err = tx.Commit().Error
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -350,7 +372,7 @@ func (s *WebAuthnService) CreateReauthenticationTokenWithAccessToken(ctx context
|
||||
|
||||
userID, ok := token.Subject()
|
||||
if !ok {
|
||||
return "", fmt.Errorf("access token does not contain user ID")
|
||||
return "", errors.New("access token does not contain user ID")
|
||||
}
|
||||
|
||||
// Check if token is issued less than a minute ago
|
||||
|
||||
226
backend/internal/storage/database.go
Normal file
226
backend/internal/storage/database.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pocket-id/pocket-id/backend/internal/model"
|
||||
datatype "github.com/pocket-id/pocket-id/backend/internal/model/types"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
var TypeDatabase = "database"
|
||||
|
||||
type databaseStorage struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewDatabaseStorage creates a new database storage provider
|
||||
func NewDatabaseStorage(db *gorm.DB) (FileStorage, error) {
|
||||
if db == nil {
|
||||
return nil, errors.New("database connection is required")
|
||||
}
|
||||
return &databaseStorage{db: db}, nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) Type() string {
|
||||
return TypeDatabase
|
||||
}
|
||||
|
||||
func (s *databaseStorage) Save(ctx context.Context, relativePath string, data io.Reader) error {
|
||||
// Normalize the path
|
||||
relativePath = filepath.ToSlash(filepath.Clean(relativePath))
|
||||
|
||||
// Read all data into memory
|
||||
b, err := io.ReadAll(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read data: %w", err)
|
||||
}
|
||||
|
||||
now := datatype.DateTime(time.Now())
|
||||
storage := model.Storage{
|
||||
Path: relativePath,
|
||||
Data: b,
|
||||
Size: int64(len(b)),
|
||||
ModTime: now,
|
||||
CreatedAt: now,
|
||||
}
|
||||
|
||||
// Use upsert: insert or update on conflict
|
||||
result := s.db.
|
||||
WithContext(ctx).
|
||||
Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "path"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"data", "size", "mod_time"}),
|
||||
}).
|
||||
Create(&storage)
|
||||
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to save file to database: %w", result.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) Open(ctx context.Context, relativePath string) (io.ReadCloser, int64, error) {
|
||||
relativePath = filepath.ToSlash(filepath.Clean(relativePath))
|
||||
|
||||
var storage model.Storage
|
||||
result := s.db.
|
||||
WithContext(ctx).
|
||||
Where("path = ?", relativePath).
|
||||
First(&storage)
|
||||
|
||||
if result.Error != nil {
|
||||
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return nil, 0, os.ErrNotExist
|
||||
}
|
||||
return nil, 0, fmt.Errorf("failed to read file from database: %w", result.Error)
|
||||
}
|
||||
|
||||
reader := io.NopCloser(bytes.NewReader(storage.Data))
|
||||
return reader, storage.Size, nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) Delete(ctx context.Context, relativePath string) error {
|
||||
relativePath = filepath.ToSlash(filepath.Clean(relativePath))
|
||||
|
||||
result := s.db.
|
||||
WithContext(ctx).
|
||||
Where("path = ?", relativePath).
|
||||
Delete(&model.Storage{})
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to delete file from database: %w", result.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) DeleteAll(ctx context.Context, prefix string) error {
|
||||
prefix = filepath.ToSlash(filepath.Clean(prefix))
|
||||
|
||||
// If empty prefix, delete all
|
||||
if isRootPath(prefix) {
|
||||
result := s.db.
|
||||
WithContext(ctx).
|
||||
Where("1 = 1"). // Delete everything
|
||||
Delete(&model.Storage{})
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to delete all files from database: %w", result.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure prefix ends with / for proper prefix matching
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
|
||||
query := s.db.WithContext(ctx)
|
||||
query = addPathPrefixClause(s.db.Name(), query, prefix)
|
||||
result := query.Delete(&model.Storage{})
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to delete files with prefix '%s' from database: %w", prefix, result.Error)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) List(ctx context.Context, prefix string) ([]ObjectInfo, error) {
|
||||
prefix = filepath.ToSlash(filepath.Clean(prefix))
|
||||
|
||||
var storageItems []model.Storage
|
||||
query := s.db.WithContext(ctx)
|
||||
|
||||
if !isRootPath(prefix) {
|
||||
// Ensure prefix matching
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
query = addPathPrefixClause(s.db.Name(), query, prefix)
|
||||
}
|
||||
|
||||
result := query.
|
||||
Select("path", "size", "mod_time").
|
||||
Find(&storageItems)
|
||||
if result.Error != nil {
|
||||
return nil, fmt.Errorf("failed to list files from database: %w", result.Error)
|
||||
}
|
||||
|
||||
objects := make([]ObjectInfo, 0, len(storageItems))
|
||||
for _, item := range storageItems {
|
||||
// Filter out directory-like paths (those that contain additional slashes after the prefix)
|
||||
relativePath := strings.TrimPrefix(item.Path, prefix)
|
||||
if strings.ContainsRune(relativePath, '/') {
|
||||
continue
|
||||
}
|
||||
|
||||
objects = append(objects, ObjectInfo{
|
||||
Path: item.Path,
|
||||
Size: item.Size,
|
||||
ModTime: time.Time(item.ModTime),
|
||||
})
|
||||
}
|
||||
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (s *databaseStorage) Walk(ctx context.Context, root string, fn func(ObjectInfo) error) error {
|
||||
root = filepath.ToSlash(filepath.Clean(root))
|
||||
|
||||
var storageItems []model.Storage
|
||||
query := s.db.WithContext(ctx)
|
||||
|
||||
if !isRootPath(root) {
|
||||
// Ensure root matching
|
||||
if !strings.HasSuffix(root, "/") {
|
||||
root += "/"
|
||||
}
|
||||
query = addPathPrefixClause(s.db.Name(), query, root)
|
||||
}
|
||||
|
||||
result := query.
|
||||
Select("path", "size", "mod_time").
|
||||
Find(&storageItems)
|
||||
if result.Error != nil {
|
||||
return fmt.Errorf("failed to walk files from database: %w", result.Error)
|
||||
}
|
||||
|
||||
for _, item := range storageItems {
|
||||
err := fn(ObjectInfo{
|
||||
Path: item.Path,
|
||||
Size: item.Size,
|
||||
ModTime: time.Time(item.ModTime),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isRootPath(path string) bool {
|
||||
return path == "" || path == "/" || path == "."
|
||||
}
|
||||
|
||||
func addPathPrefixClause(dialect string, query *gorm.DB, prefix string) *gorm.DB {
|
||||
// In SQLite, we use "GLOB" which can use the index
|
||||
switch dialect {
|
||||
case "sqlite":
|
||||
return query.Where("path GLOB ?", prefix+"*")
|
||||
case "postgres":
|
||||
return query.Where("path LIKE ?", prefix+"%")
|
||||
default:
|
||||
// Indicates a development-time error
|
||||
panic(fmt.Errorf("unsupported database dialect: %s", dialect))
|
||||
}
|
||||
}
|
||||
148
backend/internal/storage/database_test.go
Normal file
148
backend/internal/storage/database_test.go
Normal file
@@ -0,0 +1,148 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
testingutil "github.com/pocket-id/pocket-id/backend/internal/utils/testing"
|
||||
)
|
||||
|
||||
func TestDatabaseStorageOperations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
db := testingutil.NewDatabaseForTest(t)
|
||||
store, err := NewDatabaseStorage(db)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("type should be database", func(t *testing.T) {
|
||||
assert.Equal(t, TypeDatabase, store.Type())
|
||||
})
|
||||
|
||||
t.Run("save, open and list files", func(t *testing.T) {
|
||||
err := store.Save(ctx, "images/logo.png", bytes.NewBufferString("logo-data"))
|
||||
require.NoError(t, err)
|
||||
|
||||
reader, size, err := store.Open(ctx, "images/logo.png")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
contents, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("logo-data"), contents)
|
||||
assert.Equal(t, int64(len(contents)), size)
|
||||
|
||||
err = store.Save(ctx, "images/nested/child.txt", bytes.NewBufferString("child"))
|
||||
require.NoError(t, err)
|
||||
|
||||
files, err := store.List(ctx, "images")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
assert.Equal(t, "images/logo.png", files[0].Path)
|
||||
assert.Equal(t, int64(len("logo-data")), files[0].Size)
|
||||
})
|
||||
|
||||
t.Run("save should update existing file", func(t *testing.T) {
|
||||
err := store.Save(ctx, "test/update.txt", bytes.NewBufferString("original"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = store.Save(ctx, "test/update.txt", bytes.NewBufferString("updated"))
|
||||
require.NoError(t, err)
|
||||
|
||||
reader, size, err := store.Open(ctx, "test/update.txt")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
contents, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("updated"), contents)
|
||||
assert.Equal(t, int64(len("updated")), size)
|
||||
})
|
||||
|
||||
t.Run("delete files individually", func(t *testing.T) {
|
||||
err := store.Save(ctx, "images/delete-me.txt", bytes.NewBufferString("temp"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, store.Delete(ctx, "images/delete-me.txt"))
|
||||
_, _, err = store.Open(ctx, "images/delete-me.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("delete missing file should not error", func(t *testing.T) {
|
||||
require.NoError(t, store.Delete(ctx, "images/missing.txt"))
|
||||
})
|
||||
|
||||
t.Run("delete all files", func(t *testing.T) {
|
||||
require.NoError(t, store.Save(ctx, "cleanup/a.txt", bytes.NewBufferString("a")))
|
||||
require.NoError(t, store.Save(ctx, "cleanup/b.txt", bytes.NewBufferString("b")))
|
||||
require.NoError(t, store.Save(ctx, "cleanup/nested/c.txt", bytes.NewBufferString("c")))
|
||||
require.NoError(t, store.DeleteAll(ctx, "/"))
|
||||
|
||||
_, _, err := store.Open(ctx, "cleanup/a.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
_, _, err = store.Open(ctx, "cleanup/b.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
_, _, err = store.Open(ctx, "cleanup/nested/c.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("delete all files under a prefix", func(t *testing.T) {
|
||||
require.NoError(t, store.Save(ctx, "cleanup/a.txt", bytes.NewBufferString("a")))
|
||||
require.NoError(t, store.Save(ctx, "cleanup/b.txt", bytes.NewBufferString("b")))
|
||||
require.NoError(t, store.Save(ctx, "cleanup/nested/c.txt", bytes.NewBufferString("c")))
|
||||
require.NoError(t, store.DeleteAll(ctx, "cleanup"))
|
||||
|
||||
_, _, err := store.Open(ctx, "cleanup/a.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
_, _, err = store.Open(ctx, "cleanup/b.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
_, _, err = store.Open(ctx, "cleanup/nested/c.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
})
|
||||
|
||||
t.Run("walk files", func(t *testing.T) {
|
||||
require.NoError(t, store.Save(ctx, "walk/file1.txt", bytes.NewBufferString("1")))
|
||||
require.NoError(t, store.Save(ctx, "walk/file2.txt", bytes.NewBufferString("2")))
|
||||
require.NoError(t, store.Save(ctx, "walk/nested/file3.txt", bytes.NewBufferString("3")))
|
||||
|
||||
var paths []string
|
||||
err := store.Walk(ctx, "walk", func(info ObjectInfo) error {
|
||||
paths = append(paths, info.Path)
|
||||
return nil
|
||||
})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, paths, 3)
|
||||
assert.Contains(t, paths, "walk/file1.txt")
|
||||
assert.Contains(t, paths, "walk/file2.txt")
|
||||
assert.Contains(t, paths, "walk/nested/file3.txt")
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewDatabaseStorage(t *testing.T) {
|
||||
t.Run("should return error with nil database", func(t *testing.T) {
|
||||
_, err := NewDatabaseStorage(nil)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "database connection is required")
|
||||
})
|
||||
|
||||
t.Run("should create storage with valid database", func(t *testing.T) {
|
||||
db := testingutil.NewDatabaseForTest(t)
|
||||
store, err := NewDatabaseStorage(db)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, store)
|
||||
})
|
||||
}
|
||||
193
backend/internal/storage/filesystem.go
Normal file
193
backend/internal/storage/filesystem.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type filesystemStorage struct {
|
||||
root *os.Root
|
||||
absoluteRootPath string
|
||||
}
|
||||
|
||||
func NewFilesystemStorage(rootPath string) (FileStorage, error) {
|
||||
if err := os.MkdirAll(rootPath, 0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to create root directory '%s': %w", rootPath, err)
|
||||
}
|
||||
root, err := os.OpenRoot(rootPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open root directory '%s': %w", rootPath, err)
|
||||
}
|
||||
|
||||
absoluteRootPath, err := filepath.Abs(rootPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get absolute path of root directory '%s': %w", rootPath, err)
|
||||
}
|
||||
|
||||
return &filesystemStorage{root: root, absoluteRootPath: absoluteRootPath}, err
|
||||
}
|
||||
|
||||
func (s *filesystemStorage) Type() string {
|
||||
return TypeFileSystem
|
||||
}
|
||||
|
||||
func (s *filesystemStorage) Save(_ context.Context, path string, data io.Reader) error {
|
||||
path = filepath.FromSlash(path)
|
||||
|
||||
if err := s.root.MkdirAll(filepath.Dir(path), 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directories for path '%s': %w", path, err)
|
||||
}
|
||||
|
||||
// Our strategy is to save to a separate file and then rename it to override the original file
|
||||
tmpName := path + "." + uuid.NewString() + "-tmp"
|
||||
|
||||
// Write to the temporary file
|
||||
tmpFile, err := s.root.Create(tmpName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open file '%s' for writing: %w", tmpName, err)
|
||||
}
|
||||
|
||||
_, err = io.Copy(tmpFile, data)
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
_ = s.root.Remove(tmpName)
|
||||
return fmt.Errorf("failed to write temporary file: %w", err)
|
||||
}
|
||||
|
||||
if err = tmpFile.Close(); err != nil {
|
||||
_ = s.root.Remove(tmpName)
|
||||
return fmt.Errorf("failed to close temporary file: %w", err)
|
||||
}
|
||||
|
||||
// Rename to the final file, which overrides existing files
|
||||
// This is an atomic operation
|
||||
if err = s.root.Rename(tmpName, path); err != nil {
|
||||
_ = s.root.Remove(tmpName)
|
||||
return fmt.Errorf("failed to move temporary file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *filesystemStorage) Open(_ context.Context, path string) (io.ReadCloser, int64, error) {
|
||||
path = filepath.FromSlash(path)
|
||||
|
||||
file, err := s.root.Open(path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
info, err := file.Stat()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
return file, info.Size(), nil
|
||||
}
|
||||
|
||||
func (s *filesystemStorage) Delete(_ context.Context, path string) error {
|
||||
path = filepath.FromSlash(path)
|
||||
|
||||
err := s.root.Remove(path)
|
||||
if err != nil && !errors.Is(err, fs.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *filesystemStorage) DeleteAll(_ context.Context, path string) error {
|
||||
path = filepath.FromSlash(path)
|
||||
|
||||
// If "/", "." or "" is requested, we delete all contents of the root.
|
||||
if path == "" || path == "/" || path == "." {
|
||||
dir, err := s.root.Open(".")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open root directory: %w", err)
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
entries, err := dir.ReadDir(-1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list root directory: %w", err)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if err := s.root.RemoveAll(entry.Name()); err != nil {
|
||||
return fmt.Errorf("failed to delete '%s': %w", entry.Name(), err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.root.RemoveAll(path)
|
||||
}
|
||||
func (s *filesystemStorage) List(_ context.Context, path string) ([]ObjectInfo, error) {
|
||||
path = filepath.FromSlash(path)
|
||||
|
||||
dir, err := s.root.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
entries, err := dir.ReadDir(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objects := make([]ObjectInfo, 0, len(entries))
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
info, err := entry.Info()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objects = append(objects, ObjectInfo{
|
||||
Path: filepath.Join(path, entry.Name()),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
})
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
func (s *filesystemStorage) Walk(_ context.Context, root string, fn func(ObjectInfo) error) error {
|
||||
root = filepath.FromSlash(root)
|
||||
|
||||
fullPath := filepath.Clean(filepath.Join(s.absoluteRootPath, root))
|
||||
|
||||
// As we can't use os.Root here, we manually ensure that the fullPath is within the root directory
|
||||
sep := string(filepath.Separator)
|
||||
if !strings.HasPrefix(fullPath+sep, s.absoluteRootPath+sep) {
|
||||
return fmt.Errorf("invalid root path: %s", root)
|
||||
}
|
||||
|
||||
return filepath.WalkDir(fullPath, func(full string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
rel, err := filepath.Rel(s.absoluteRootPath, full)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return fn(ObjectInfo{
|
||||
Path: filepath.ToSlash(rel),
|
||||
Size: info.Size(),
|
||||
ModTime: info.ModTime(),
|
||||
})
|
||||
})
|
||||
}
|
||||
68
backend/internal/storage/filesystem_test.go
Normal file
68
backend/internal/storage/filesystem_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFilesystemStorageOperations(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
store, err := NewFilesystemStorage(t.TempDir())
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("save, open and list files", func(t *testing.T) {
|
||||
err := store.Save(ctx, "images/logo.png", bytes.NewBufferString("logo-data"))
|
||||
require.NoError(t, err)
|
||||
|
||||
reader, size, err := store.Open(ctx, "images/logo.png")
|
||||
require.NoError(t, err)
|
||||
defer reader.Close()
|
||||
|
||||
contents, err := io.ReadAll(reader)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []byte("logo-data"), contents)
|
||||
assert.Equal(t, int64(len(contents)), size)
|
||||
|
||||
err = store.Save(ctx, "images/nested/child.txt", bytes.NewBufferString("child"))
|
||||
require.NoError(t, err)
|
||||
|
||||
files, err := store.List(ctx, "images")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, files, 1)
|
||||
assert.Equal(t, filepath.Join("images", "logo.png"), files[0].Path)
|
||||
assert.Equal(t, int64(len("logo-data")), files[0].Size)
|
||||
})
|
||||
|
||||
t.Run("delete files individually and idempotently", func(t *testing.T) {
|
||||
err := store.Save(ctx, "images/delete-me.txt", bytes.NewBufferString("temp"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, store.Delete(ctx, "images/delete-me.txt"))
|
||||
_, _, err = store.Open(ctx, "images/delete-me.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
// Deleting a missing object should be a no-op.
|
||||
require.NoError(t, store.Delete(ctx, "images/missing.txt"))
|
||||
})
|
||||
|
||||
t.Run("delete all files under a prefix", func(t *testing.T) {
|
||||
require.NoError(t, store.Save(ctx, "images/a.txt", bytes.NewBufferString("a")))
|
||||
require.NoError(t, store.Save(ctx, "images/b.txt", bytes.NewBufferString("b")))
|
||||
require.NoError(t, store.DeleteAll(ctx, "images"))
|
||||
|
||||
_, _, err := store.Open(ctx, "images/a.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
|
||||
_, _, err = store.Open(ctx, "images/b.txt")
|
||||
require.Error(t, err)
|
||||
assert.True(t, IsNotExist(err))
|
||||
})
|
||||
}
|
||||
190
backend/internal/storage/s3.go
Normal file
190
backend/internal/storage/s3.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
awscfg "github.com/aws/aws-sdk-go-v2/config"
|
||||
"github.com/aws/aws-sdk-go-v2/credentials"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
s3types "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/aws/smithy-go"
|
||||
)
|
||||
|
||||
type S3Config struct {
|
||||
Bucket string
|
||||
Region string
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
ForcePathStyle bool
|
||||
DisableDefaultIntegrityChecks bool
|
||||
Root string
|
||||
}
|
||||
|
||||
type s3Storage struct {
|
||||
client *s3.Client
|
||||
bucket string
|
||||
prefix string
|
||||
}
|
||||
|
||||
func NewS3Storage(ctx context.Context, cfg S3Config) (FileStorage, error) {
|
||||
creds := credentials.NewStaticCredentialsProvider(cfg.AccessKeyID, cfg.SecretAccessKey, "")
|
||||
awsCfg, err := awscfg.LoadDefaultConfig(ctx, awscfg.WithRegion(cfg.Region), awscfg.WithCredentialsProvider(creds))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load AWS configuration: %w", err)
|
||||
}
|
||||
client := s3.NewFromConfig(awsCfg, func(o *s3.Options) {
|
||||
if cfg.Endpoint != "" {
|
||||
o.BaseEndpoint = aws.String(cfg.Endpoint)
|
||||
}
|
||||
o.UsePathStyle = cfg.ForcePathStyle
|
||||
if cfg.DisableDefaultIntegrityChecks {
|
||||
o.RequestChecksumCalculation = aws.RequestChecksumCalculationWhenRequired
|
||||
o.ResponseChecksumValidation = aws.ResponseChecksumValidationWhenRequired
|
||||
}
|
||||
})
|
||||
|
||||
return &s3Storage{
|
||||
client: client,
|
||||
bucket: cfg.Bucket,
|
||||
prefix: strings.Trim(cfg.Root, "/"),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *s3Storage) Type() string {
|
||||
return TypeS3
|
||||
}
|
||||
|
||||
func (s *s3Storage) Save(ctx context.Context, path string, data io.Reader) error {
|
||||
_, err := s.client.PutObject(ctx, &s3.PutObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(s.buildObjectKey(path)),
|
||||
Body: data,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *s3Storage) Open(ctx context.Context, path string) (io.ReadCloser, int64, error) {
|
||||
resp, err := s.client.GetObject(ctx, &s3.GetObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(s.buildObjectKey(path)),
|
||||
})
|
||||
if err != nil {
|
||||
if isS3NotFound(err) {
|
||||
return nil, 0, fs.ErrNotExist
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
return resp.Body, aws.ToInt64(resp.ContentLength), nil
|
||||
}
|
||||
|
||||
func (s *s3Storage) Delete(ctx context.Context, path string) error {
|
||||
_, err := s.client.DeleteObject(ctx, &s3.DeleteObjectInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Key: aws.String(s.buildObjectKey(path)),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *s3Storage) DeleteAll(ctx context.Context, path string) error {
|
||||
|
||||
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(s.buildObjectKey(path)),
|
||||
})
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(page.Contents) == 0 {
|
||||
continue
|
||||
}
|
||||
objects := make([]s3types.ObjectIdentifier, 0, len(page.Contents))
|
||||
for _, obj := range page.Contents {
|
||||
objects = append(objects, s3types.ObjectIdentifier{Key: obj.Key})
|
||||
}
|
||||
_, err = s.client.DeleteObjects(ctx, &s3.DeleteObjectsInput{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Delete: &s3types.Delete{Objects: objects, Quiet: aws.Bool(true)},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3Storage) List(ctx context.Context, path string) ([]ObjectInfo, error) {
|
||||
paginator := s3.NewListObjectsV2Paginator(s.client, &s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(s.bucket),
|
||||
Prefix: aws.String(s.buildObjectKey(path)),
|
||||
})
|
||||
var objects []ObjectInfo
|
||||
for paginator.HasMorePages() {
|
||||
page, err := paginator.NextPage(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, obj := range page.Contents {
|
||||
if obj.Key == nil {
|
||||
continue
|
||||
}
|
||||
objects = append(objects, ObjectInfo{
|
||||
Path: aws.ToString(obj.Key),
|
||||
Size: aws.ToInt64(obj.Size),
|
||||
ModTime: aws.ToTime(obj.LastModified),
|
||||
})
|
||||
}
|
||||
}
|
||||
return objects, nil
|
||||
}
|
||||
|
||||
func (s *s3Storage) Walk(ctx context.Context, root string, fn func(ObjectInfo) error) error {
|
||||
objects, err := s.List(ctx, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, obj := range objects {
|
||||
if err := fn(obj); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3Storage) buildObjectKey(p string) string {
|
||||
p = filepath.Clean(p)
|
||||
p = filepath.ToSlash(p)
|
||||
p = strings.Trim(p, "/")
|
||||
|
||||
if p == "" || p == "." {
|
||||
return s.prefix
|
||||
}
|
||||
|
||||
if s.prefix == "" {
|
||||
return p
|
||||
}
|
||||
|
||||
return s.prefix + "/" + p
|
||||
}
|
||||
|
||||
func isS3NotFound(err error) bool {
|
||||
var apiErr smithy.APIError
|
||||
if errors.As(err, &apiErr) {
|
||||
if apiErr.ErrorCode() == "NotFound" || apiErr.ErrorCode() == "NoSuchKey" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
var missingKey *s3types.NoSuchKey
|
||||
return errors.As(err, &missingKey)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user