Compare commits

..

1 Commits
main ... v0.2.2

Author SHA1 Message Date
Dario Tranchitella
681328b1bc chore(go): bumping up github.com/emicklei/go-restful to v2.16.0 2023-02-17 14:18:57 +01:00
767 changed files with 65479 additions and 63152 deletions

View File

@@ -9,7 +9,7 @@ assignees: ''
<!--
Thanks for taking time reporting a Capsule bug!
-->
# Bug description

View File

@@ -32,4 +32,4 @@ How would the new interaction with Capsule look like? E.g.
Feel free to add a diagram if that helps explain things.
# Expected behavior
A clear and concise description of what you expect to happen.
A clear and concise description of what you expect to happen.

View File

@@ -1,7 +1,20 @@
<!--
Read the contribution guidelines before creating a pull request.
https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md
# General contribution criteria
Thanks for spending some time for improving and fixing Capsule!
We're still working on the outline of the contribution guidelines but we're
following ourselves these points:
- reference a previously opened issue: https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls#issues-and-pull-requests
- including a sentence or two in the commit description for the
changelog/release notes
- splitting changes into several and documented small commits
- limit the git subject to 50 characters and write as the continuation of the
sentence "If applied, this commit will ..."
- explain what and why in the body, if more than a trivial change, wrapping at
72 characters
If you have any issue or question, reach out us!
https://clastix.slack.com >>> #capsule channel
-->

View File

@@ -1,21 +0,0 @@
name: Checks if an input is defined
description: Checks if an input is defined and outputs 'true' or 'false'.
inputs:
value:
description: value to test
required: true
outputs:
result:
description: outputs 'true' or 'false' if input value is defined or not
value: ${{ steps.check.outputs.result }}
runs:
using: composite
steps:
- shell: bash
id: check
run: |
echo "result=${{ inputs.value != '' }}" >> $GITHUB_OUTPUT

View File

@@ -1,20 +0,0 @@
name: Setup caches
description: Setup caches for go modules and build cache.
inputs:
build-cache-key:
description: build cache prefix
runs:
using: composite
steps:
- uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-pkg-mod-${{ hashFiles('**/go.sum') }}-${{ hashFiles('Makefile') }}
- uses: actions/cache@cdf6c1fa76f9f475f3d7449005a359c84ca0f306 # v5.0.3
if: ${{ inputs.build-cache-key }}
with:
path: ~/.cache/go-build
key: ${{ runner.os }}-build-cache-${{ inputs.build-cache-key }}-${{ hashFiles('**/go.sum') }}-${{ hashFiles('Makefile') }}

View File

@@ -1,10 +1,8 @@
remote: origin
target-branch: main
target-branch: master
chart-dirs:
- charts
chart-repos:
- capsule=https://projectcapsule.github.io/charts/
helm-extra-args: "--timeout 600s"
helm-extra-args: "--timeout 600s"
validate-chart-schema: false
validate-maintainers: false
validate-yaml: true

View File

@@ -1,18 +1,6 @@
---
ignore:
- config/
- charts/*/templates/
- charts/**/templates/
rules:
truthy:
level: warning
allowed-values:
- "true"
- "false"
- "on"
- "off"
check-keys: false
---
rules:
braces:
min-spaces-inside: 0
max-spaces-inside: 0
@@ -51,3 +39,5 @@ rules:
new-lines:
type: unix
trailing-spaces: enable
truthy:
level: warning

View File

@@ -1,37 +1,23 @@
maintainers:
- name: Adriano Pezzuto
github: https://github.com/bsctl
company: Clastix
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/clastix/capsule
- https://github.com/clastix/capsule-proxy
- name: Dario Tranchitella
github: https://github.com/prometherion
company: Clastix
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/clastix/capsule
- https://github.com/clastix/capsule-proxy
- name: Maksim Fedotov
github: https://github.com/MaxFedotov
company: wargaming.net
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/clastix/capsule
- https://github.com/clastix/capsule-proxy
- name: Oliver Bähler
github: https://github.com/oliverbaehler
company: Peak Scale
company: Bedag Informatik AG
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/projectcapsule/capsule-proxy
- name: Massimiliano Giovagnoli
github: https://github.com/maxgio92
company: Proximus
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/projectcapsule/capsule-proxy
- name: Hristo Hristov
github: https://github.com/Svarrogh1337
company: Vaerolabs
projects:
- https://github.com/projectcapsule/capsule
- https://github.com/projectcapsule/capsule-proxy
- https://github.com/clastix/capsule

View File

@@ -1,25 +0,0 @@
name: Check actions
permissions: {}
on:
pull_request:
branches:
- "*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Ensure SHA pinned actions
uses: zgosalvez/github-actions-ensure-sha-pinned-actions@d5d20e15f2736816ee0e001ba8b24b54d9ffcff4 # v5.0.0
with:
# slsa-github-generator requires using a semver tag for reusable workflows.
# See: https://github.com/slsa-framework/slsa-github-generator#referencing-slsa-builders-and-generators
allowlist: |
slsa-framework/slsa-github-generator

View File

@@ -1,22 +0,0 @@
name: Check Commit
permissions: {}
on:
push:
branches:
- "*"
pull_request:
branches:
- "*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
commit_lint:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@b948419dd99f3fd78a6548d48f94e3df7f6bf3ed # v6.2.1

View File

@@ -1,37 +0,0 @@
name: "Check Pull Request"
on:
pull_request_target:
types:
- opened
- edited
- synchronize
permissions:
pull-requests: write
jobs:
main:
name: Validate PR title
runs-on: ubuntu-latest
steps:
- uses: amannn/action-semantic-pull-request@b439535a8eb2122b748ed2b45d1693aaabe5b0aa
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
types: |
chore
ci
docs
feat
fix
test
sec
requireScope: false
wip: false
# If the PR only contains a single commit, the action will validate that
# it matches the configured pattern.
validateSingleCommit: true
# Related to `validateSingleCommit` you can opt-in to validate that the PR
# title matches a single commit to avoid confusion.
validateSingleCommitMatchesPrTitle: true

49
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,49 @@
name: CI
on:
push:
branches: [ "*" ]
pull_request:
branches: [ "*" ]
jobs:
commit_lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v2
with:
firstParent: true
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2.3.0
with:
version: v1.45.2
only-new-issues: false
args: --timeout 2m --config .golangci.yml
diff:
name: diff
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: actions/setup-go@v2
with:
go-version: '1.18'
- run: make installer
- name: Checking if YAML installer file is not aligned
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
- run: make apidoc
- name: Checking if the CRDs documentation is not aligned
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> CRDs generated documentation have not been committed" && git --no-pager diff && exit 1; fi
- name: Checking if YAML installer generated untracked files
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
- name: Checking if source code is not formatted
run: test -z "$(git diff 2> /dev/null)"

View File

@@ -1,86 +0,0 @@
name: Coverage
on:
push:
branches:
- "main"
pull_request:
types: [opened, reopened, synchronize]
branches:
- "main"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
compliance:
name: "License Compliance"
runs-on: ubuntu-24.04
steps:
- name: "Checkout Code"
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Check secret
id: checksecret
uses: ./.github/actions/exists
with:
value: ${{ secrets.FOSSA_API_KEY }}
- name: "Run FOSSA Scan"
if: steps.checksecret.outputs.result == 'true'
uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
with:
api-key: ${{ secrets.FOSSA_API_KEY }}
- name: "Run FOSSA Test"
if: steps.checksecret.outputs.result == 'true'
uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
with:
api-key: ${{ secrets.FOSSA_API_KEY }}
run-tests: true
sast:
name: "SAST"
runs-on: ubuntu-24.04
env:
GO111MODULE: on
permissions:
security-events: write
actions: read
contents: read
steps:
- name: Checkout Source
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: Run Gosec Security Scanner
uses: securego/gosec@424fc4cd9c82ea0fd6bee9cd49c2db2c3cc0c93f # v2.22.11
with:
args: '-no-fail -fmt sarif -out gosec.sarif ./...'
- name: Upload SARIF file
uses: github/codeql-action/upload-sarif@8aac4e47ac8ace7d9e0e0b4ef7407aff0ceb5e87
with:
sarif_file: gosec.sarif
unit_tests:
name: "Unit tests"
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: Unit Test
run: make test
- name: Check secret
id: checksecret
uses: ./.github/actions/exists
with:
value: ${{ secrets.CODECOV_TOKEN }}
- name: Upload Report to Codecov
if: ${{ steps.checksecret.outputs.result == 'true' }}
uses: codecov/codecov-action@671740ac38dd9b0130fbe1cec585b89eea48d3de # v5.5.2
with:
token: ${{ secrets.CODECOV_TOKEN }}
slug: projectcapsule/capsule
files: ./coverage.out
fail_ci_if_error: true
verbose: true

View File

@@ -1,45 +0,0 @@
name: Build images
permissions: {}
on:
pull_request:
branches:
- "*"
paths:
- '.github/workflows/docker-*.yml'
- 'api/**'
- 'controllers/**'
- 'pkg/**'
- 'e2e/*'
- '.ko.yaml'
- 'go.*'
- 'main.go'
- 'Makefile'
jobs:
build-images:
runs-on: ubuntu-latest
permissions:
security-events: write
actions: read
contents: read
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: ko build
run: VERSION=${{ github.sha }} make ko-build-all
- name: Trivy Scan Image
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
with:
scan-type: 'fs'
ignore-unfixed: true
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
env:
# Trivy is returning TOOMANYREQUESTS
# See: https://github.com/aquasecurity/trivy-action/issues/389#issuecomment-2385416577
TRIVY_DB_REPOSITORY: 'public.ecr.aws/aquasecurity/trivy-db:2'
- name: Upload Trivy scan results to GitHub Security tab
uses: github/codeql-action/upload-sarif@8aac4e47ac8ace7d9e0e0b4ef7407aff0ceb5e87
with:
sarif_file: 'trivy-results.sarif'

97
.github/workflows/docker-ci.yml vendored Normal file
View File

@@ -0,0 +1,97 @@
name: docker-ci
on:
push:
tags:
- "v*"
jobs:
docker-ci:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Generate build-args
id: build-args
run: |
# Declare vars for internal use
VERSION=$(git describe --abbrev=0 --tags)
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
GIT_MODIFIED_2=$(git diff --quiet && echo "" || echo ".dirty")
# Export to GH_ENV
echo "GIT_LAST_TAG=$VERSION" >> $GITHUB_ENV
echo "GIT_HEAD_COMMIT=$GIT_HEAD_COMMIT" >> $GITHUB_ENV
echo "GIT_TAG_COMMIT=$GIT_TAG_COMMIT" >> $GITHUB_ENV
echo "GIT_MODIFIED=$(echo "$GIT_MODIFIED_1""$GIT_MODIFIED_2")" >> $GITHUB_ENV
echo "GIT_REPO=$(git config --get remote.origin.url)" >> $GITHUB_ENV
echo "BUILD_DATE=$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)" >> $GITHUB_ENV
- name: Docker meta
id: meta
uses: docker/metadata-action@v3
with:
images: |
quay.io/${{ github.repository }}
docker.io/${{ github.repository }}
tags: |
type=semver,pattern={{raw}}
flavor: |
latest=false
- name: Set up QEMU
id: qemu
uses: docker/setup-qemu-action@v1
with:
platforms: arm64,arm
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
install: true
- name: Inspect builder
run: |
echo "Name: ${{ steps.buildx.outputs.name }}"
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
echo "Status: ${{ steps.buildx.outputs.status }}"
echo "Flags: ${{ steps.buildx.outputs.flags }}"
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
- name: Login to quay.io Container Registry
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ github.repository_owner }}+github
password: ${{ secrets.BOT_QUAY_IO }}
- name: Login to docker.io Container Registry
uses: docker/login-action@v1
with:
registry: docker.io
username: ${{ secrets.USER_DOCKER_IO }}
password: ${{ secrets.BOT_DOCKER_IO }}
- name: Build and push
id: build-release
uses: docker/build-push-action@v2
with:
file: Dockerfile
context: .
platforms: linux/amd64,linux/arm64,linux/arm
push: true
tags: ${{ steps.meta.outputs.tags }}
build-args: |
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
GIT_REPO=${{ env.GIT_REPO }}
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
BUILD_DATE=${{ env.BUILD_DATE }}
- name: Image digest
run: echo ${{ steps.build-release.outputs.digest }}

View File

@@ -1,69 +0,0 @@
name: Publish images
permissions: {}
on:
push:
tags:
- "v*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish-images:
runs-on: ubuntu-latest
permissions:
packages: write
id-token: write
outputs:
capsule-digest: ${{ steps.publish-capsule.outputs.digest }}
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Setup caches
uses: ./.github/actions/setup-caches
timeout-minutes: 5
continue-on-error: true
with:
build-cache-key: publish-images
- name: Run Trivy vulnerability (Repo)
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # 0.33.1
with:
scan-type: 'fs'
ignore-unfixed: true
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Publish Capsule
id: publish-capsule
uses: peak-scale/github-actions/make-ko-publish@a441cca016861c546ab7e065277e40ce41a3eb84 # v0.2.0
with:
makefile-target: ko-publish-capsule
registry: ghcr.io
registry-username: ${{ github.actor }}
registry-password: ${{ secrets.GITHUB_TOKEN }}
repository: ${{ github.repository_owner }}
version: ${{ github.ref_name }}
sign-image: true
sbom-name: capsule
sbom-repository: ghcr.io/${{ github.repository_owner }}/capsule
signature-repository: ghcr.io/${{ github.repository_owner }}/capsule
main-path: ./cmd/
env:
REPOSITORY: ${{ github.repository }}
generate-capsule-provenance:
needs: publish-images
permissions:
id-token: write # To sign the provenance.
packages: write # To upload assets to release.
actions: read # To read the workflow path.
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ghcr.io/${{ github.repository_owner }}/capsule
digest: "${{ needs.publish-images.outputs.capsule-digest }}"
registry-username: ${{ github.actor }}
secrets:
registry-password: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,33 +0,0 @@
name: docs-lint
permissions: {}
on:
push:
branches:
- "*"
paths:
- '.github/workflows/docs-lint.yml'
- 'docs/content/**'
pull_request:
branches:
- "*"
paths:
- '.github/workflows/docs-lint.yml'
- 'docs/content/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
spelling:
name: Spell Check
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
- uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0
with:
node-version: 18
- run: make docs-lint

View File

@@ -1,15 +1,24 @@
name: e2e
permissions: {}
on:
pull_request:
branches:
- "*"
push:
branches: [ "*" ]
paths:
- '.github/workflows/e2e.yml'
- 'api/**'
- 'controllers/**'
- 'pkg/**'
- 'e2e/*'
- 'Dockerfile'
- 'go.*'
- 'main.go'
- 'Makefile'
pull_request:
branches: [ "*" ]
paths:
- '.github/workflows/e2e.yml'
- 'api/**'
- 'controllers/**'
- 'internal/**'
- 'pkg/**'
- 'e2e/*'
- 'Dockerfile'
@@ -17,51 +26,32 @@ on:
- 'main.go'
- 'Makefile'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
e2e:
name: E2E Testing (CE)
runs-on:
labels: ubuntu-latest-8-cores
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
- name: e2e
run: sudo make e2e
run-e2e:
name: E2E Testing
kind:
name: Kubernetes
strategy:
fail-fast: false
matrix:
k8s-version:
- 'v1.31.0'
- 'v1.32.0'
- 'v1.33.0'
- 'v1.34.0'
runs-on:
labels: ubuntu-latest-8-cores
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.6', 'v1.24.7', 'v1.25.3', 'v1.26.0']
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/checkout@v2
with:
repository: ${{ github.event.client_payload.repo }}
ref: ${{ github.event.client_payload.sha }}
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
fetch-depth: 0
- uses: actions/setup-go@v2
with:
go-version-file: 'go.mod'
- uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
- name: e2e (Enterprise)
run: sudo KUBERNETES_SUPPORTED_VERSION=${{ matrix.k8s-version }} make e2e
go-version: '1.18'
- run: make manifests
- name: Checking if manifests are disaligned
run: test -z "$(git diff 2> /dev/null)"
- name: Checking if manifests generated untracked files
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
- uses: engineerd/setup-kind@v0.5.0
with:
skipClusterCreation: true
version: v0.14.0
- uses: azure/setup-helm@v1
with:
version: 3.3.4
- name: e2e testing
run: make e2e/${{ matrix.k8s-version }}

18
.github/workflows/gosec.yml vendored Normal file
View File

@@ -0,0 +1,18 @@
name: CI gosec
on:
push:
branches: [ "*" ]
pull_request:
branches: [ "*" ]
jobs:
tests:
runs-on: ubuntu-latest
env:
GO111MODULE: on
steps:
- name: Checkout Source
uses: actions/checkout@v2
- name: Run Gosec Security Scanner
uses: securego/gosec@master
with:
args: ./...

View File

@@ -1,82 +0,0 @@
name: Publish charts
permissions: read-all
on:
push:
tags:
- 'v*'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
publish-helm:
# Skip this Release on forks
if: github.repository_owner == 'projectcapsule'
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: "Extract Version"
id: extract_version
run: |
GIT_TAG=${GITHUB_REF##*/}
VERSION=${GIT_TAG##*v}
echo "version=$(echo $VERSION)" >> $GITHUB_OUTPUT
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@0ad2bb377311d61ac04ad9eb6f252fb68e207260 # v1.7.0
with:
token: "${{ secrets.HELM_CHARTS_PUSH_TOKEN }}"
linting: off
chart_version: ${{ steps.extract_version.outputs.version }}
app_version: ${{ steps.extract_version.outputs.version }}
charts_dir: charts
charts_url: https://${{ github.repository_owner }}.github.io/charts
owner: ${{ github.repository_owner }}
repository: charts
branch: gh-pages
commit_username: ${{ github.actor }}
publish-helm-oci:
runs-on: ubuntu-24.04
permissions:
contents: write
id-token: write
packages: write
outputs:
chart-digest: ${{ steps.helm_publish.outputs.digest }}
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: "Extract Version"
id: extract_version
run: |
GIT_TAG=${GITHUB_REF##*/}
VERSION=${GIT_TAG##*v}
echo "version=$(echo $VERSION)" >> $GITHUB_OUTPUT
- name: Helm | Publish
id: helm_publish
uses: peak-scale/github-actions/helm-oci-chart@a441cca016861c546ab7e065277e40ce41a3eb84 # v0.2.0
with:
registry: ghcr.io
repository: ${{ github.repository_owner }}/charts
name: "capsule"
version: ${{ steps.extract_version.outputs.version }}
app-version: ${{ steps.extract_version.outputs.version }}
registry-username: ${{ github.actor }}
registry-password: ${{ secrets.GITHUB_TOKEN }}
update-dependencies: 'true' # Defaults to false
sign-image: 'true'
signature-repository: ghcr.io/${{ github.repository_owner }}/charts/capsule
helm-provenance:
needs: publish-helm-oci
permissions:
id-token: write # To sign the provenance.
packages: write # To upload assets to release.
actions: read # To read the workflow path.
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ghcr.io/${{ github.repository_owner }}/charts/capsule
digest: "${{ needs.publish-helm-oci.outputs.chart-digest }}"
registry-username: ${{ github.actor }}
secrets:
registry-password: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,66 +0,0 @@
name: Test charts
permissions: {}
on:
pull_request:
branches:
- "main"
paths:
- '.github/configs/**'
- '.github/workflows/helm-*.yml'
- 'charts/**'
- 'Makefile'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
linter-artifacthub:
runs-on: ubuntu-latest
container:
image: artifacthub/ah
options: --user root
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Run ah lint
working-directory: ./charts/
run: ah lint
lint:
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4
- name: Linting Chart
run: helm lint ./charts/capsule
- name: Run chart-testing (lint)
run: make helm-lint
- name: Run docs-testing (helm-docs)
id: helm-docs
run: |
make helm-docs
if [[ $(git diff --stat) != '' ]]; then
echo -e '\033[0;31mDocumentation outdated! (Run make helm-docs locally and commit)\033[0m ❌'
git diff --color
exit 1
else
echo -e '\033[0;32mDocumentation up to date\033[0m ✔'
fi
- name: Run schema-testing (helm-schema)
id: helm-schema
run: |
make helm-schema
if [[ $(git diff --stat) != '' ]]; then
echo -e '\033[0;31mSchema outdated! (Run make helm-schema locally and commit)\033[0m ❌'
git diff --color
exit 1
else
echo -e '\033[0;32mSchema up to date\033[0m ✔'
fi
- name: Run chart-testing (install)
run: HELM_KIND_CONFIG="./hack/kind-cluster.yml" make helm-test

86
.github/workflows/helm.yml vendored Normal file
View File

@@ -0,0 +1,86 @@
name: Helm Chart
on:
push:
branches: [ "*" ]
tags: [ "helm-v*" ]
pull_request:
branches: [ "*" ]
jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
with:
fetch-depth: 0
- uses: azure/setup-helm@v1
with:
version: 3.3.4
- name: Linting Chart
run: helm lint ./charts/capsule
- name: Setup Chart Linting
id: lint
uses: helm/chart-testing-action@v2.3.0
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ./.github/configs/ct.yaml)
if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true"
fi
- name: Run chart-testing (lint)
run: ct lint --debug --config ./.github/configs/ct.yaml --lint-conf ./.github/configs/lintconf.yaml
- name: Run docs-testing (helm-docs)
id: helm-docs
run: |
make helm-docs
if [[ $(git diff --stat) != '' ]]; then
echo -e '\033[0;31mDocumentation outdated! (Run make helm-docs locally and commit)\033[0m ❌'
git diff --color
exit 1
else
echo -e '\033[0;32mDocumentation up to date\033[0m ✔'
fi
# ATTENTION: This is a workaround for the upcoming ApiVersion Conversions for the capsule CRDs
# With this workflow the current docker image is build and loaded into kind, otherwise the install fails
# In the future this must be removed and the chart-testing-action must be used
- name: Run chart-testing (install)
run: make helm-test
if: steps.list-changed.outputs.changed == 'true'
## Create KIND Cluster
#- name: Create kind cluster
# uses: helm/kind-action@v1.2.0
# if: steps.list-changed.outputs.changed == 'true'
## Install Required Operators/CRDs
#- name: Prepare Cluster Operators/CRDs
# run: |
# # Cert-Manager CRDs
# kubectl create -f https://github.com/cert-manager/cert-manager/releases/download/v1.9.1/cert-manager.crds.yaml
#
# # Prometheus CRDs
# kubectl create -f https://github.com/prometheus-operator/prometheus-operator/releases/download/v0.58.0/bundle.yaml
# if: steps.list-changed.outputs.changed == 'true'
## Install Charts
#- name: Run chart-testing (install)
# run: ct install --debug --config ./.github/configs/ct.yaml
# if: steps.list-changed.outputs.changed == 'true'
release:
if: startsWith(github.ref, 'refs/tags/helm-v')
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Publish Helm chart
uses: stefanprodan/helm-gh-pages@master
with:
token: ${{ secrets.BOT_GITHUB_TOKEN }}
charts_dir: charts
charts_url: https://clastix.github.io/charts
owner: clastix
repository: charts
branch: gh-pages
target_dir: .
commit_username: prometherion
commit_email: dario@tranchitella.eu

View File

@@ -1,53 +0,0 @@
name: Linting
permissions: {}
on:
push:
branches:
- "*"
pull_request:
branches:
- "*"
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
manifests:
name: diff
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: Generate manifests
run: |
make generate
make manifests
if [[ $(git diff --stat) != '' ]]; then
echo -e '\033[0;31mManifests outdated! (Run make manifests locally and commit)\033[0m ❌'
git diff --color
exit 1
else
echo -e '\033[0;32mDocumentation up to date\033[0m ✔'
fi
yamllint:
name: yamllint
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Install yamllint
run: pip install yamllint
- name: Lint YAML files
run: yamllint -c=.github/configs/lintconf.yaml .
golangci:
name: lint
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: Run golangci-lint
run: make golint

View File

@@ -1,42 +0,0 @@
name: Go Release
permissions: {}
on:
push:
tags:
- 'v*'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
create-release:
runs-on: ubuntu-latest
permissions:
contents: write
id-token: write
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: 'go.mod'
- name: Setup caches
uses: ./.github/actions/setup-caches
timeout-minutes: 5
continue-on-error: true
- uses: creekorful/goreportcard-action@1f35ced8cdac2cba28c9a2f2288a16aacfd507f9 # v1.0
- uses: anchore/sbom-action/download-syft@5620efe7f17de3b70cbc020fc49ce9048f1bbacf
- name: Install Cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@e435ccd777264be153ace6237001ef4d979d3a7a # v6.4.0
with:
version: latest
args: release --clean --timeout 90m
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,42 +0,0 @@
name: Scorecards supply-chain security
permissions: {}
on:
schedule:
- cron: '0 0 * * 5'
push:
branches:
- main
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
analysis:
runs-on: ubuntu-latest
permissions:
security-events: write
id-token: write
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
persist-credentials: false
- name: Run analysis
uses: ossf/scorecard-action@4eaacf0543bb3f2c246792bd56e8cdeffafb205a # v2.4.3
with:
results_file: results.sarif
results_format: sarif
repo_token: ${{ secrets.SCORECARD_READ_TOKEN }}
publish_results: true
- name: Upload artifact
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: SARIF file
path: results.sarif
retention-days: 5
- name: Upload to code-scanning
uses: github/codeql-action/upload-sarif@6bc82e05fd0ea64601dd4b465378bbcf57de0314 # v4.32.1
with:
sarif_file: results.sarif

View File

@@ -1,26 +0,0 @@
name: Stale-Bot
permissions: {}
on:
schedule:
- cron: '0 0 * * *' # Run every day at midnight
jobs:
stale:
runs-on: ubuntu-latest
permissions:
actions: write
contents: write # only for delete-branch option
issues: write
pull-requests: write
steps:
- name: Close stale pull requests
uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f
with:
stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 30 days.'
stale-pr-message: 'This pull request has been marked as stale because it has been inactive for more than 30 days. Please update this pull request or it will be automatically closed in 30 days.'
days-before-issue-stale: 60
days-before-pr-stale: 30
days-before-issue-close: 30
days-before-pr-close: 30
stale-pr-label: stale

4
.gitignore vendored
View File

@@ -6,9 +6,6 @@
*.so
*.dylib
bin
dist/
config/
builds/
# Test binary, build with `go test -c`
*.test
@@ -33,4 +30,3 @@ builds/
.DS_Store
*.tgz
kind.yaml
capsule-seccomp.json

View File

@@ -1,91 +0,0 @@
version: "2"
run:
tests: false
allow-parallel-runners: true
linters:
default: all
disable:
- godoclint
- depguard
- err113
- exhaustruct
- funlen
- gochecknoglobals
- gochecknoinits
- ireturn
- lll
- mnd
- nilnil
- nonamedreturns
- paralleltest
- perfsprint
- recvcheck
- testpackage
- unparam
- varnamelen
- wrapcheck
- interfacebloat
- noinlineerr
- revive
settings:
cyclop:
max-complexity: 27
dupl:
threshold: 100
gocognit:
min-complexity: 50
goconst:
min-len: 2
min-occurrences: 2
goheader:
template: |-
Copyright 2020-2026 Project Capsule Authors
SPDX-License-Identifier: Apache-2.0
inamedparam:
skip-single-param: true
nakedret:
max-func-lines: 50
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
paths:
- zz_.*\.go$
- .+\.generated.go
- .+_test.go
- .+_test_.+.go
- third_party$
- builtin$
- examples$
rules:
- path: pkg/meta/
linters:
- dupl
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
settings:
gci:
sections:
- standard
- default
- prefix(github.com/projectcapsule/capsule)
gofumpt:
module-path: github.com/projectcapsule/capsule
extra-rules: false
exclusions:
generated: lax
paths:
- zz_.*\.go$
- .+\.generated.go
- .+_test.go
- .+_test_.+.go
- third_party$
- builtin$
- examples$

48
.golangci.yml Normal file
View File

@@ -0,0 +1,48 @@
linters-settings:
govet:
check-shadowing: true
dupl:
threshold: 100
goconst:
min-len: 2
min-occurrences: 2
cyclop:
max-complexity: 27
gocognit:
min-complexity: 50
gci:
sections:
- standard
- default
- prefix(github.com/clastix/capsule)
goheader:
template: |-
Copyright 2020-2021 Clastix Labs
SPDX-License-Identifier: Apache-2.0
linters:
enable-all: true
disable:
- funlen
- gochecknoinits
- lll
- exhaustivestruct
- maligned
- interfacer
- scopelint
- golint
- gochecknoglobals
- goerr113
- gomnd
- paralleltest
- ireturn
- testpackage
- varnamelen
- wrapcheck
service:
golangci-lint-version: 1.45.2
run:
skip-files:
- "zz_.*\\.go$"

View File

@@ -1,128 +0,0 @@
project_name: capsule
env:
- COSIGN_EXPERIMENTAL=true
- GO111MODULE=on
before:
hooks:
- go mod download
gomod:
proxy: false
builds:
- id: "{{ .ProjectName }}"
main: ./cmd/
binary: "{{ .ProjectName }}-{{ .Os }}-{{ .Arch }}"
env:
- CGO_ENABLED=0
goarch:
- amd64
- arm64
goos:
- linux
flags:
- -trimpath
mod_timestamp: '{{ .CommitTimestamp }}'
ldflags:
- >-
-X main.Version={{ .Tag }}
-X main.GitCommit={{ .Commit }}
-X main.GitTag={{ .Tag }}
-X main.GitDirty={{ .Date }}
-X main.BuildTime={{ .Date }}
-X main.GitRepo={{ .ProjectName }}
# - id: "{{ .ProjectName }}-wasm"
# main: ./cmd/
# binary: "{{ .ProjectName }}.wasm"
# env:
# - CGO_ENABLED=0
# goos:
# - js
# goarch:
# - wasm
# flags:
# - -trimpath
# mod_timestamp: '{{ .CommitTimestamp }}'
# ldflags:
# - >-
# -X main.Version={{ .Tag }}
# -X main.GitCommit={{ .Commit }}
# -X main.GitTag={{ .Tag }}
# -X main.GitDirty={{ .Date }}
# -X main.BuildTime={{ .Date }}
# -X main.GitRepo={{ .ProjectName }}
release:
prerelease: auto
footer: |
**Full Changelog**: https://github.com/projectcapsule/{{ .ProjectName }}/compare/{{ .PreviousTag }}...{{ .Tag }}
[Check out what's new in this release](https://projectcapsule.dev/docs/whats-new/)
**Docker Images**
- `ghcr.io/projectcapsule/{{ .ProjectName }}:{{ .Version }}`
- `ghcr.io/projectcapsule/{{ .ProjectName }}:latest`
**Helm Chart**
View this release on [Artifact Hub](https://artifacthub.io/packages/helm/projectcapsule/capsule/{{ .Version }}) or use the OCI helm chart:
- `ghcr.io/projectcapsule/charts/{{ .ProjectName }}:{{ .Version }}`
[Review the Major Changes section first before upgrading to a new version](https://artifacthub.io/packages/helm/projectcapsule/capsule/{{ .Version }}#major-changes)
> [!IMPORTANT]
> **Kubernetes compatibility**
>
> Note that the Capsule project offers support only for the latest minor version of Kubernetes.
> Backwards compatibility with older versions of Kubernetes and OpenShift is [offered by vendors](https://projectcapsule.dev/support/).
>
> | Kubernetes version | Minimum required |
> |--------------------|------------------|
> | `v1.35` | `>= 1.35.0` |
Thanks to all the contributors! 🚀 🦄
checksum:
name_template: 'checksums.txt'
changelog:
sort: asc
use: github
filters:
exclude:
- '^test:'
- '^chore'
- '^rebase:'
- 'merge conflict'
- Merge pull request
- Merge remote-tracking branch
- Merge branch
groups:
# https://github.com/conventional-changelog/commitlint/tree/master/%40commitlint/config-conventional
- title: '🛠 Dependency updates'
regexp: '^fix\(deps\):|^feat\(deps\):'
order: 300
- title: '✨ New Features'
regexp: '^feat(\([^)]*\))?:'
order: 100
- title: '🐛 Bug fixes'
regexp: '^fix(\([^)]*\))?:'
order: 200
- title: '📖 Documentation updates'
regexp: '^docs(\([^)]*\))?:'
order: 400
- title: '🛡️ Security updates'
regexp: '^sec(\([^)]*\))?:'
order: 500
- title: '🚀 Build process updates'
regexp: '^(build|ci)(\([^)]*\))?:'
order: 600
- title: '📦 Other work'
regexp: '^chore(\([^)]*\))?:|^chore:'
order: 9999
sboms:
- artifacts: archive
signs:
- cmd: cosign
args:
- "sign-blob"
- "--output-signature=${signature}"
- "${artifact}"
- "--yes"
artifacts: all

View File

@@ -1,9 +0,0 @@
defaultPlatforms:
- linux/arm64
- linux/amd64
- linux/arm
builds:
- id: capsule
main: ./cmd/
ldflags:
- '{{ if index .Env "LD_FLAGS" }}{{ .Env.LD_FLAGS }}{{ end }}'

View File

@@ -1,15 +0,0 @@
nwa:
cmd: "update"
holder: "Project Capsule Authors"
year: "2020-2026"
spdxids: "Apache-2.0"
path:
- "pkg/**/*.go"
- "cmd/**/*.go"
- "api/**/*.go"
- "internal/**/*.go"
- "controllers/**/*.go"
- "main.go"
mute: false
verbose: true
fuzzy: true

View File

@@ -1,46 +0,0 @@
repos:
- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook
rev: v9.24.0
hooks:
- id: commitlint
stages: [commit-msg]
additional_dependencies: ['@commitlint/config-conventional', 'commitlint-plugin-function-rules']
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
hooks:
- id: check-executables-have-shebangs
- id: double-quote-string-fixer
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/adrienverge/yamllint
rev: v1.38.0
hooks:
- id: yamllint
args: [-c=.github/configs/lintconf.yaml]
- repo: local
hooks:
- id: run-helm-docs
name: Execute helm-docs
entry: make helm-docs
language: system
files: ^charts/
- id: run-helm-schema
name: Execute helm-schema
entry: make helm-schema
language: system
files: ^charts/
- id: run-helm-lint
name: Execute helm-lint
entry: make helm-lint
language: system
files: ^charts/
- id: golangci-lint
name: Execute golangci-lint
entry: make golint
language: system
files: \.go$
- id: go-test
name: Execute go test
entry: make test
language: system
files: \.go$

View File

@@ -2,48 +2,10 @@
This is a list of companies that have adopted Capsule, feel free to open a Pull-Request to get yours listed.
[See all on the website](https://projectcapsule.dev/adopters/)
## Adopters list (alphabetically)
### [Bedag Informatik AG](https://www.bedag.ch/)
<img src="https://www.bedag.ch/wGlobal/wGlobal/layout/images/logo.svg" alt="Bedag" width="350" />
### [Department of Defense](https://www.defense.gov/)
<img src="https://www.access-board.gov/images/dod-seal.png" alt="United States Department of Defense" width="350" />
### [Enreach](https://www.enreach.com/)
<img src="https://campaigns.enreach.com/hubfs/Global/logos/Enreach-logo-vertical-indigo.svg" alt="Enreach" width="350" />
### [Fastweb](https://www.fastweb.it/)
<img src="https://www.fastweb.it/var/storage_feeds/CMS-Company/articoli/0c2/0c252987b90a18017dedf2ed9feda129/640x360.jpg" alt="Fastweb" width="350" />
### [Klarrio](https://klarrio.com/)
<img src="https://klarrio.com/wp-content/uploads/klarrio.png" alt="Klarrio" width="350" />
### [KubeRocketCI](https://docs.kuberocketci.io/)
<img src="https://raw.githubusercontent.com/epam/edp-install/master/docs/assets/krci-logo-267×150-white.png" alt="KubeRocketCI" width="350" />
### [ODC-Noord](https://odc-noord.nl/)
<img src="./assets/customer_logo/odc-noord-logo.png" alt="ODC-Noord" width="350" />
### [PITS Global Data Recovery Services](https://www.pitsdatarecovery.net)
<img src="https://www.pitsdatarecovery.net/wp-content/uploads/2020/09/pits-logo.svg" alt="PITS Global Data Recovery Services" width="350" />
### [Politecnico di Torino](https://www.polito.it/)
<img src="https://www.polito.it/themes/custom/polito/polito_logo_desktop.svg" alt="Politecnico di Torino" width="350" />
### [Reevo](https://www.reevo.it/)
<img src="https://www.reevo.it/hs-fs/hubfs/logo_reevo_azzurro.png" alt="Reevo Cloud and CyberSecurity" width="350" />
### [Seeweb](https://seeweb.it/en)
<img src="https://www.seeweb.it/assets/images/logo-seeweb.svg" alt="Seeweb x Serverless GPU" width="350" />
### [University of Torino](https://www.unito.it)
<img src="https://www.unito.it/sites/all/themes/bsunito/img/logo_new_2022.svg" alt="University of Torino" width="350" />
### [Velocity](https://velocity.tech/)
<img src="https://raw.githubusercontent.com/yarelm/velocity-logo/main/velocity.png" alt="Velocity" width="350" />
![Bedag](https://www.bedag.ch/wGlobal/wGlobal/layout/images/logo.svg)
### [Wargaming.net](https://www.wargaming.net/)
<img src="https://download.logo.wine/logo/Wargaming_%28company%29/Wargaming_%28company%29-Logo.wine.png" alt="Wargaming.net" width="350" />
![Wargaming.net](https://static-cspbe-eu.wargaming.net/images/logo@2x.png)

View File

@@ -1,10 +0,0 @@
# Changelog
Changes are published with their type and scope for each release in the release description. Changes are assigned based on their commit description. Read more on how commits should be formatted in the [Contributing](CONTRIBUTING.md#commits) guide.
See the [Releases](https://github.com/projectcapsule/capsule/releases)
## Helm Chart
For the helm chart, a dedicated changelog is created based on the chart's annotations ([See](./DEVELOPMENT.md#helm-changelog)).

View File

@@ -1,7 +1,5 @@
# Contributor Covenant Code of Conduct
Capsule follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
@@ -116,7 +114,7 @@ the community.
## Attribution
This Code of Conduct follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and is adapted from the [Contributor Covenant][homepage],
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.

View File

@@ -1,188 +0,0 @@
# Contributing
All contributions are welcome! If you find a bug or have a feature request, please open an issue or submit a pull request.
## Ways to contribute
### 1. Report Issues
Issues to Capsule help improve the project in multiple ways including the following:
* Report potential bugs
* Request a feature
* Request a sample policy
### 2. Engagement
Engage with the community on [Slack](https://kubernetes.slack.com/archives/C03GETTJQRL) and help new users with questions or issues they may have.
### 3. Submit changes
Submit technical changes via pull requests. New contributors may easily view all open issues labeled as [good first issues](https://github.com/projectcapsule/capsule/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) allowing you to get started in an approachable manner.
Once you wish to get started contributing to the code base, please refer to our [development guide](DEVELOPMENT.md) for a how-to. **[We accept pull requests from forks only](#create-a-pull-request)**.
Before creating a pull request, please ensure that your changes are tested and that the documentation is updated accordingly.
When creating a pull request, please visit:
* [commits](#commits)
## Guidelines
The following guidelines outline the semantics and processes which apply to technical contributions to the project.
## Supported Versions
Versions follow [Semantic Versioning](https://semver.org/) terminology and are expressed as `x.y.z`:
- where x is the major version
- y is the minor version
- and z is the patch version
Security fixes, may be backported to the three most recent minor releases, depending on severity and feasibility.
Prereleases are marked as `-rc.x` (release candidate) and may refere to any type of version bump.
## Pull Requests
The pull request title is checked according to the described [semantics](#semantics) (pull requests don't require a scope). However pull requests are currently not used to generate the changelog. Check if your pull requests body meets the following criteria:
- reference a previously opened issue: https://docs.github.com/en/github/writing-on-github/autolinked-references-and-urls#issues-and-pull-requests
- splitting changes into several and documented small commits
- limit the git subject to 50 characters and write as the continuation of the
sentence "If applied, this commit will ..."
- explain what and why in the body, if more than a trivial change, wrapping at
72 characters
If your pull request in a draft state and not ready yet for review, you can prefix the title with `[WIP]`. This will indicate that work is still ongoing:
[WIP] feat(controller): new cool feature
### Create a Pull Request
Head over to the project repository on GitHub and click the **"Fork"** button. With the forked copy, you can try new ideas and implement changes to the project.
1. **Clone the repository to your device:**
Get the link of your forked repository, paste it in your device terminal and clone it using the command.
```sh
git clone https://hostname/YOUR-USERNAME/YOUR-REPOSITORY
```
2. **Create a branch:**
Create a new branch and navigate to it using this command.
```sh
git checkout -b <new-branch>
```
3. **Stage, Commit, and Push changes:**
Now that we have implemented the required changes, use the command below to stage the changes and commit them.
```sh
git add .
```
```sh
git commit -s -m "Commit message"
```
Go ahead and push your changes to GitHub using this command.
```sh
git push
```
## Commits
The commit message is checked according to the described [semantics](#semantics). Commits are used to generate the changelog and their author will be referenced in the changelog.
### Reorganising commits
To reorganise your commits, do the following (or use your way of doing it):
1. Pull upstream changes
```bash
git remote add upstream git@github.com:projectcapsule/capsule.git
git pull upstream main
```
2. Pick the current upstream HEAD (the commit is marked with `(remote/main, main)`)
```bash
git log
....
commit 10bbf39ac1ac3ad4f8485422e54faa9aadf03315 (remote/main, main)
Author: Oliver Bähler <oliverbaehler@hotmail.com>
Date: Mon Oct 23 10:24:44 2023 +0200
docs(repo): add sbom reference
Signed-off-by: Oliver Bähler <oliverbaehler@hotmail.com>
```
3. Soft reset to the commit of the upstream HEAD
```bash
git reset --soft 10bbf39ac1ac3ad4f8485422e54faa9aadf03315
```
4. Remove staged files (if any)
```bash
git restore --staged .
```
5. Add files manually and create new [commits](#commits), until all files are included
```bash
git add charts/capsule/
git commit -s -m "feat(chart): add nodeselector value"
...
```
6. Force push the changes to your fork
```bash
git push origin main -f
```
### Sign-Off
Developer Certificate of Origin (DCO) Sign off
For contributors to certify that they wrote or otherwise have the right to submit the code they are contributing to the project, we are requiring everyone to acknowledge this by signing their work which indicates you agree to the DCO found here.
To sign your work, just add a line like this at the end of your commit message:
Signed-off-by: Random J Developer <random@developer.example.org>
This can easily be done with the -s command line option to append this automatically to your commit message.
git commit -s -m 'This is my commit message'
## Semantics
The semantics should indicate the change and it's impact. The general format for commit messages and pull requests is the following:
feat(ui): Add `Button` component
^ ^ ^
| | |__ Subject
| |_______ Scope
|____________ Type
The commits are checked on pull-request. If the commit message does not follow the format, the workflow will fail. See the [Types](#types) for the supported types. The scope is not required but helps to provide more context for your changes. Try to use a scope if possible.
### Types
The following types are allowed for commits and pull requests:
* `chore`: housekeeping changes, no production code change
* `ci`: changes to building process/workflows
* `docs`: changes to documentation
* `feat`: new features
* `fix`: bug fixes
* `test`: test related changes
* `sec`: security related changes

View File

@@ -1,47 +0,0 @@
# Environment Dependencies Policy
## Purpose
This policy describes how Capsule maintainers consume third-party packages.
## Scope
This policy applies to all Capsule maintainers and all third-party packages used in the Capsule project.
## Policy
Capsule maintainers must follow these guidelines when consuming third-party packages:
- Only use third-party packages that are necessary for the functionality of Capsule.
- Use the latest version of all third-party packages whenever possible.
- Avoid using third-party packages that are known to have security vulnerabilities.
- Pin all third-party packages to specific versions in the Capsule codebase.
- Use a dependency management tool, such as Go modules, to manage third-party dependencies.
- Dependencies must pass all automated tests before being merged into the Capsule codebase.
## Procedure
When adding a new third-party package to Capsule, maintainers must follow these steps:
1. Evaluate the need for the package. Is it necessary for the functionality of Capsule?
2. Research the package. Is it well-maintained? Does it have a good reputation?
3. Choose a version of the package. Use the latest version whenever possible.
4. Pin the package to the specific version in the Capsule codebase.
5. Update the Capsule documentation to reflect the new dependency.
## Archive/Deprecation
When a third-party package is discontinued, the Capsule maintainers must fensure to replace the package with a suitable alternative.
## Enforcement
This policy is enforced by the Capsule maintainers.
Maintainers are expected to review each other's code changes to ensure that they comply with this policy.
## Exceptions
Exceptions to this policy may be granted by the Capsule project lead on a case-by-case basis.
## Credits
This policy was adapted from the [Kubescape Community](https://github.com/kubescape/kubescape/blob/master/docs/environment-dependencies-policy.md)

View File

@@ -1,236 +0,0 @@
# Development
Our Makefile helps you with the development of new changes or fixes. [You may have a look at it](./Makefile), since not all targets are documented.
To execute your changes locally, you can run the binary locally. This will run just the capsule controller. We recommend [to setup a development environment](#development-environment) for a better development experience:
```bash
make run
```
## Building
You can build the docker image locally, Ko will be installed via go, so you don't need to install it manually.
```bash
make ko-build-all
```
This will push the build to your local docker images.
## Test
Execute unit testing:
```bash
make test
```
## E2E Test
**New changes always require dedcated E2E tests. E2E help us to ensure the quality of the code and it's functionality.**
For E2E test we use the [ginkgo](https://github.com/onsi/ginkgo) framework. Ou can see all the test under [e2e](./e2e/).
With the following command a new KinD cluster is created with the Kubernetes version `v1.20.7` (This can be done with any available Kubernetes version). A docker image is created and pushed and loaded into the KinD cluster. Then the E2E tests are executed against the KinD cluster.
```bash
make e2e/v1.20.7
```
You can also just run the e2e tests without the creation of a new kind cluster:
```
make e2e-exec
```
The E2E tests are also executed via the [github workflow](./.github/workflows/e2e.yaml) on every PR and push to the main branch.
# Development Environment
During development, we prefer that the code is running within our IDE locally, instead of running as the normal Pod(s) within the Kubernetes cluster.
Such a setup can be illustrated as below diagram:
![Development Environment](./assets/docs/dev-env.png)
## Setup Development Environment
To achieve that, there are some necessary steps we need to walk through, which have been made as a make target within our Makefile.
So the TL;DR answer is:
**Make sure a *KinD* cluster is running on your laptop, and then run `make dev-setup` to setup the dev environment.**. This is not done in the `make dev-setup` setup.
```bash
# Create a KinD cluster if not already created
$ make dev-cluster
# To retrieve your laptop's IP and execute `make dev-setup` to setup dev env
# For example: LAPTOP_HOST_IP=192.168.10.101 make dev-setup
$ LAPTOP_HOST_IP="<YOUR_LAPTOP_IP>" make dev-setup
# Monitoring Setup (Grafana/Prometheus/Pyroscope)
$ LAPTOP_HOST_IP="<YOUR_LAPTOP_IP>" make dev-setup-monitoring
```
### Setup
We recommend to setup the development environment with the make `dev-setup` target. However here is a step by step guide to setup the development environment for understanding.
1. Scaling down the deployed Pod(s) to 0
We need to scale the existing replicas of capsule-controller-manager to 0 to avoid reconciliation competition between the Pod(s) and the code running outside of the cluster, in our preferred IDE for example.
```bash
$ kubectl -n capsule-system scale deployment capsule-controller-manager --replicas=0
deployment.apps/capsule-controller-manager scaled
```
2. Preparing TLS certificate for the webhooks
Running webhooks requires TLS, we can prepare the TLS key pair in our development env to handle HTTPS requests.
```bash
# Prepare a simple OpenSSL config file
# Do remember to export LAPTOP_HOST_IP before running this command
$ cat > _tls.cnf <<EOF
[ req ]
default_bits = 4096
distinguished_name = req_distinguished_name
req_extensions = req_ext
[ req_distinguished_name ]
countryName = SG
stateOrProvinceName = SG
localityName = SG
organizationName = CAPSULE
commonName = CAPSULE
[ req_ext ]
subjectAltName = @alt_names
[alt_names]
IP.1 = ${LAPTOP_HOST_IP}
EOF
# Create this dir to mimic the Pod mount point
$ mkdir -p /tmp/k8s-webhook-server/serving-certs
# Generate the TLS cert/key under /tmp/k8s-webhook-server/serving-certs
$ openssl req -newkey rsa:4096 -days 3650 -nodes -x509 \
-subj "/C=SG/ST=SG/L=SG/O=CAPSULE/CN=CAPSULE" \
-extensions req_ext \
-config _tls.cnf \
-keyout /tmp/k8s-webhook-server/serving-certs/tls.key \
-out /tmp/k8s-webhook-server/serving-certs/tls.crt
# Clean it up
$ rm -f _tls.cnf
```
3. Patching the Webhooks
By default, the webhooks will be registered with the services, which will route to the Pods, inside the cluster. We need to delegate the controllers' and webhook's services to the code running in our IDE by patching the `MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration`.
```bash
# Export your laptop's IP with the 9443 port exposed by controllers/webhooks' services
$ export WEBHOOK_URL="https://${LAPTOP_HOST_IP}:9443"
# Export the cert we just generated as the CA bundle for webhook TLS
$ export CA_BUNDLE=`openssl base64 -in /tmp/k8s-webhook-server/serving-certs/tls.crt | tr -d '\n'`
kubectl patch MutatingWebhookConfiguration capsule-mutating-webhook-configuration \
--type='json' -p="[\
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/namespace-owner-reference\",'caBundle':\"$${CA_BUNDLE}\"}}\
]"
kubectl patch ValidatingWebhookConfiguration capsule-validating-webhook-configuration \
--type='json' -p="[\
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/cordoning\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/ingresses\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/namespaces\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/networkpolicies\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/4/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/nodes\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/5/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/pods\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/6/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/persistentvolumeclaims\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/services\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/8/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/tenants\",'caBundle':\"$${CA_BUNDLE}\"}}\
]"
kubectl patch crd tenants.capsule.clastix.io \
--type='json' -p="[\
{'op': 'replace', 'path': '/spec/conversion/webhook/clientConfig', 'value':{'url': \"$${WEBHOOK_URL}\", 'caBundle': \"$${CA_BUNDLE}\"}}\
]"
kubectl patch crd capsuleconfigurations.capsule.clastix.io \
--type='json' -p="[\
{'op': 'replace', 'path': '/spec/conversion/webhook/clientConfig', 'value':{'url': \"$${WEBHOOK_URL}\", 'caBundle': \"$${CA_BUNDLE}\"}}\
]";
```
## Running Capsule
When the Development Environment is set up, we can run Capsule controllers with webhooks outside of the Kubernetes cluster:
```bash
$ export NAMESPACE=capsule-system && export TMPDIR=/tmp/
$ go run .
```
To verify that, we can open a new console and create a new Tenant in a new shell:
```bash
$ kubectl apply -f - <<EOF
apiVersion: capsule.clastix.io/v1beta2
kind: Tenant
metadata:
name: gas
spec:
owners:
- name: alice
kind: User
EOF
```
We should see output and logs in the make run console.
Now it's time to work through our familiar inner loop for development in our preferred IDE. For example, if you're using [Visual Studio Code](https://code.visualstudio.com/), this launch.json file can be a good start.
## Helm Chart
You can test your changes made to the helm chart locally. They are almost identical to the checks executed in the github workflows.
Run chart linting (ct lint):
```bash
make helm-lint
```
Run chart tests (ct install). This creates a KinD cluster, builds the current image and loads it into the cluster and installs the helm chart:
```bash
make helm-test
```
### Documentation
Documentation of the chart is done with [helm-docs](https://github.com/norwoodj/helm-docs). Therefor all documentation relevant changes for the chart must be done in the [README.md.gotmpl](./charts/capsule/README.md.gotmpl) file. You can run this locally with this command (requires running docker daemon):
```bash
make helm-docs
...
time="2023-10-23T13:45:08Z" level=info msg="Found Chart directories [charts/capsule]"
time="2023-10-23T13:45:08Z" level=info msg="Generating README Documentation for chart /helm-docs/charts/capsule"
```
This will update the documentation for the chart in the `README.md` file.
### Helm Changelog
The `version` of the chart does not require a bump, since it's driven by our release process. The `appVersion` of the chart is the version of the Capsule project. This is the version that should be bumped when a new Capsule version is released. This will be done by the maintainers.
To create the proper changelog for the helm chart, all changes which affect the helm chart must be documented as chart annotation. See all the available [chart annotations](https://artifacthub.io/docs/topics/annotations/helm/).
This annotation can be provided using two different formats: using a plain list of strings with the description of the change or using a list of objects with some extra structured information (see example below). Please feel free to use the one that better suits your needs. The UI experience will be slightly different depending on the choice. When using the list of objects option the valid supported kinds are `added`, `changed`, `deprecated`, `removed`, `fixed` and `security`.

40
Dockerfile Normal file
View File

@@ -0,0 +1,40 @@
# Build the manager binary
FROM golang:1.18 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
ARG TARGETARCH
ARG GIT_HEAD_COMMIT
ARG GIT_TAG_COMMIT
ARG GIT_LAST_TAG
ARG GIT_MODIFIED
ARG GIT_REPO
ARG BUILD_DATE
# Copy the go source
COPY main.go main.go
COPY version.go version.go
COPY api/ api/
COPY controllers/ controllers/
COPY pkg/ pkg/
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=$TARGETARCH GO111MODULE=on go build \
-gcflags "-N -l" \
-ldflags "-X main.GitRepo=$GIT_REPO -X main.GitTag=$GIT_LAST_TAG -X main.GitCommit=$GIT_HEAD_COMMIT -X main.GitDirty=$GIT_MODIFIED -X main.BuildTime=$BUILD_DATE" \
-o manager
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER nonroot:nonroot
ENTRYPOINT ["/manager"]

View File

@@ -1,17 +0,0 @@
# Target Binary
ARG TARGET_IMAGE
FROM ${TARGET_IMAGE} AS target
# Inject Harpoon Image
FROM ghcr.io/alegrey91/harpoon:latest
WORKDIR /
COPY --from=target /ko-app/cmd ./manager
RUN chmod +x ./harpoon
ENTRYPOINT ["/harpoon", \
"capture", \
"-f", "main.main", \
"-E", "NAMESPACE=capsule-system", \
"-i", "2", \
"-c", "-e", \
"-S", "-D", "/tmp/results/", \
"--", "/manager"]

View File

@@ -1,133 +0,0 @@
# Capsule Project Governance
The **Capsule** project is dedicated to creating a multi-tenancy and policy-based framework for Kubernetes. This governance explains how the project is run.
- [Values](#values)
- [Maintainers](#maintainers)
- [Becoming a Maintainer](#becoming-a-maintainer)
- [Meetings](#meetings)
- [CNCF Resources](#cncf-resources)
- [Code of Conduct Enforcement](#code-of-conduct)
- [Security Response Team](#security-response-team)
- [Voting](#voting)
- [Modifications](#modifying-this-charter)
## Values
The Capsule and its leadership embrace the following values:
* Openness: Communication and decision-making happens in the open and is discoverable for future
reference. As much as possible, all discussions and work take place in public
Slack channels and open repositories.
* Fairness: All stakeholders have the opportunity to provide feedback and submit
contributions, which will be considered on their merits.
* Community over Product or Company: Sustaining and growing our community takes
priority over shipping code or sponsors' organizational goals. Each
contributor participates in the project as an individual.
* Community Before Individual Demand: As a community-driven open source project, we emphasize
the importance of collaboration and contribution. Maintainers and contributors work together towards the project's growth, not to serve unilateral user demands. Users pretending features or enhancements for their sole benefit without contributing to the effort are not aligned with our community values.
* Inclusivity: We innovate through different perspectives and skill sets, which
can only be accomplished in a welcoming and respectful environment.
* Participation: Responsibilities within the project are earned through
participation, and there is a clear path up the contributor ladder into leadership
positions.
## Maintainers
Capsule Maintainers have write access to the [project GitHub repository](https://github.com/orgs/projectcapsule). They can merge their own patches or patches from others. The current maintainers
can be found in [MAINTAINERS.md](./MAINTAINERS.md). Maintainers collectively manage the project's
resources and contributors.
This privilege is granted with some expectation of responsibility: maintainers
are people who care about the Capsule project and want to help it grow and
improve. A maintainer is not just someone who can make changes, but someone who
has demonstrated their ability to collaborate with the team, get the most
knowledgeable people to review code and docs.
A maintainer is a contributor to the project's success and a citizen helping
the project succeed. The collective team of all Maintainers is known as the Maintainer Council, which
is the governing body for the project.
### Becoming a Maintainer
To become a Maintainer you need to demonstrate the following:
* commitment to the project:
* participate in discussions, contributions, code and documentation reviews,
* perform reviews for non-trivial pull requests,
* contribute non-trivial pull requests and have them merged,
* ability to write quality code and/or documentation,
* ability to collaborate with the team,
* understanding of how the team works (policies, processes for testing and code review, etc),
* understanding of the project's purpose, code base and coding and documentation style.
A new Maintainer must be proposed by an existing maintainer by sending a message to all the other existing Maintainers. A simple majority vote of existing Maintainers
approves the application. Maintainers nominations will be evaluated without prejudice
to employer or demographics.
Maintainers who are selected will be granted the necessary GitHub rights.
### Removing a Maintainer
Maintainers may resign at any time if they feel that they will not be able to
continue fulfilling their project duties.
Maintainers may also be removed after being inactive, failure to fulfill their
Maintainer responsibilities, violating the Code of Conduct, or other reasons.
A Maintainer may be removed at any time by a 2/3 vote of the remaining maintainers.
Depending on the reason for removal, a Maintainer may be converted to Emeritus
status. Emeritus Maintainers will still be consulted on some project matters,
and can be rapidly returned to Maintainer status if their availability changes.
## Meetings
Time zones permitting, Maintainers are expected to participate in the public
developer meeting and/or public discussions.
Maintainers will also have closed meetings in order to discuss security reports
or Code of Conduct violations. Such meetings should be scheduled by any
Maintainer on receipt of a security issue or CoC report. All current Maintainers
must be invited to such closed meetings, except for any Maintainer who is
accused of a CoC violation.
## CNCF Resources
Any Maintainer may suggest a request for CNCF resources. A simple majority of Maintainers
approves the request. The Maintainers may also choose to delegate working with the CNCF to non-Maintainer community members, who will then be added to the [CNCF's Maintainer List](https://github.com/cncf/foundation/blob/main/project-maintainers.csv) for that purpose.
## Code of Conduct
[Code of Conduct](./CODE_OF_CONDUCT.md)
violations by community members will be discussed and resolved in private Maintainer meetings. If a Maintainer is directly involved in the report, the Maintainers will instead designate two Maintainers to work with the CNCF Code of Conduct Committee in resolving it.
## Security Response Team
The Maintainers will appoint a Security Response Team to handle security reports.
This committee may simply consist of the Maintainer Council themselves. If this
responsibility is delegated, the Maintainers will appoint a team of at least two
contributors to handle it. The Maintainers will review who is assigned to this
at least once a year.
The Security Response Team is responsible for handling all reports of security
holes and breaches according to the [security policy](TODO:Link to security.md).
## Voting
While most business in Capsule Project is conducted by "[lazy consensus](https://community.apache.org/committers/lazyConsensus.html)",
periodically the Maintainers may need to vote on specific actions or changes.
Any Maintainer may demand a vote be taken.
Most votes require a simple majority of all Maintainers to succeed, except where
otherwise noted. Two-thirds majority votes mean at least two-thirds of all
existing maintainers.
## Modifying this Charter
Changes to this Governance and its supporting documents may be approved by
a 2/3 vote of the Maintainers.

View File

@@ -1,14 +0,0 @@
The current Maintainers Group for the [TODO: Projectname] Project consists of:
| Name | Employer | Responsibilities |
| ------------------------- | ----------- | ---------------- |
| Adriano Pezzuto | Clastix | Maintainer |
| Dario Tranchitella | Clastix | Maintainer |
| Maksim Fedotov | Wargaming | Maintainer |
| Oliver Bähler | Peak Scale | Maintainer |
| Hristo Hristov | Vaerolabs | Maintainer |
| Massimiliano Giovagnoli | Proximus | Maintainer |
This list must be kept in sync with the [CNCF Project Maintainers list](https://github.com/cncf/foundation/blob/master/project-maintainers.csv).
See [the project Governance](GOVERNANCE.md) for how maintainers are selected and replaced.

623
Makefile
View File

@@ -1,30 +1,8 @@
# Version
GIT_HEAD_COMMIT ?= $(shell git rev-parse --short HEAD)
VERSION ?= $(or $(shell git describe --abbrev=0 --tags --match "v*" 2>/dev/null),$(GIT_HEAD_COMMIT))
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
# Defaults
REGISTRY ?= ghcr.io
REPOSITORY ?= projectcapsule/capsule
GIT_TAG_COMMIT ?= $(shell git rev-parse --short $(VERSION))
GIT_MODIFIED_1 ?= $(shell git diff $(GIT_HEAD_COMMIT) $(GIT_TAG_COMMIT) --quiet && echo "" || echo ".dev")
GIT_MODIFIED_2 ?= $(shell git diff --quiet && echo "" || echo ".dirty")
GIT_MODIFIED ?= $(shell echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
GIT_REPO ?= $(shell git config --get remote.origin.url)
BUILD_DATE ?= $(shell git log -1 --format="%at" | xargs -I{} sh -c 'if [ "$(shell uname)" = "Darwin" ]; then date -r {} +%Y-%m-%dT%H:%M:%S; else date -d @{} +%Y-%m-%dT%H:%M:%S; fi')
IMG_BASE ?= $(REPOSITORY)
IMG ?= $(IMG_BASE):$(VERSION)
CAPSULE_IMG ?= $(REGISTRY)/$(IMG_BASE)
CLUSTER_NAME ?= capsule
## Kubernetes Version Support
KUBERNETES_SUPPORTED_VERSION ?= "v1.35.0"
## Tool Binaries
KUBECTL ?= kubectl
HELM ?= helm
# Current Operator version
VERSION ?= $$(git describe --abbrev=0 --tags --match "v*")
# Default bundle image tag
BUNDLE_IMG ?= clastix/capsule:$(VERSION)-bundle
# Options for 'bundle-build'
ifneq ($(origin CHANNELS), undefined)
BUNDLE_CHANNELS := --channels=$(CHANNELS)
@@ -34,6 +12,9 @@ BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
endif
BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# Image URL to use all building/pushing image targets
IMG ?= clastix/capsule:$(VERSION)
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
@@ -41,16 +22,20 @@ else
GOBIN=$(shell go env GOBIN)
endif
# Get information about git current status
GIT_HEAD_COMMIT ?= $$(git rev-parse --short HEAD)
GIT_TAG_COMMIT ?= $$(git rev-parse --short $(VERSION))
GIT_MODIFIED_1 ?= $$(git diff $(GIT_HEAD_COMMIT) $(GIT_TAG_COMMIT) --quiet && echo "" || echo ".dev")
GIT_MODIFIED_2 ?= $$(git diff --quiet && echo "" || echo ".dirty")
GIT_MODIFIED ?= $$(echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
GIT_REPO ?= $$(git config --get remote.origin.url)
BUILD_DATE ?= $$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)
all: manager
# Run tests
.PHONY: test
test: test-clean generate manifests test-clean
@GO111MODULE=on go test -race -v $(shell go list ./... | grep -v "e2e") -coverprofile coverage.out
.PHONY: test-clean
test-clean: ## Clean tests cache
@go clean -testcache
test: generate manifests
go test ./... -coverprofile cover.out
# Build manager binary
manager: generate golint
@@ -60,80 +45,65 @@ manager: generate golint
run: generate manifests
go run .
# Creates the single file to install Capsule without any external dependency
installer: manifests kustomize
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default > config/install.yaml
# Install CRDs into a cluster
install: installer
$(KUSTOMIZE) build config/crd | kubectl apply -f -
# Uninstall CRDs from a cluster
uninstall: installer
$(KUSTOMIZE) build config/crd | kubectl delete -f -
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
deploy: installer
kubectl apply -f config/install.yaml
# Remove controller in the configured Kubernetes cluster in ~/.kube/config
remove: installer
kubectl delete -f config/install.yaml
kubectl delete clusterroles.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found
kubectl delete clusterrolebindings.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found
# Generate manifests e.g. CRD, RBAC etc.
manifests: generate
$(CONTROLLER_GEN) crd paths="./..." output:crd:artifacts:config=charts/capsule/crds
manifests: controller-gen
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
# Generate code
generate: controller-gen
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
# Generate License Header
license-headers: nwa
$(NWA) config
apidoc: apidocs-gen
$(APIDOCS_GEN) crdoc --resources config/crd/bases --output docs/content/general/crds-apis.md --template docs/template/reference-cr.tmpl
# Helm
SRC_ROOT = $(shell git rev-parse --show-toplevel)
helm-controller-version:
$(eval VERSION := $(shell grep 'appVersion:' charts/capsule/Chart.yaml | awk '{print "v"$$2}'))
$(eval KO_TAGS := $(shell grep 'appVersion:' charts/capsule/Chart.yaml | awk '{print "v"$$2}'))
helm-docs: helm-doc
$(HELM_DOCS) --chart-search-root ./charts
helm-docs: HELMDOCS_VERSION := v1.11.0
helm-docs: docker
@docker run -v "$(SRC_ROOT):/helm-docs" jnorwood/helm-docs:$(HELMDOCS_VERSION) --chart-search-root /helm-docs
helm-lint: ct
@$(CT) lint --config .github/configs/ct.yaml --validate-yaml=false --all --debug
@ct lint --config $(SRC_ROOT)/.github/configs/ct.yaml --lint-conf $(SRC_ROOT)/.github/configs/lintconf.yaml --all --debug
helm-schema: helm-plugin-schema
cd charts/capsule && $(HELM) schema --use-helm-docs
helm-test: kind ct docker-build
@kind create cluster --wait=60s --name capsule-charts
@kind load docker-image --name capsule-charts ${IMG}
@kubectl create ns capsule-system
@ct install --config $(SRC_ROOT)/.github/configs/ct.yaml --namespace=capsule-system --all --debug
@kind delete cluster --name capsule-charts
helm-test: HELM_KIND_CONFIG ?= ""
helm-test: kind
@mkdir -p /tmp/results || true
@$(KIND) create cluster --wait=60s --name capsule-charts --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config ./hack/kind-cluster.yaml
@make helm-test-exec
@$(KIND) delete cluster --name capsule-charts
helm-test-exec: ct helm-controller-version ko-build-all
$(MAKE) e2e-load-image CLUSTER_NAME=capsule-charts IMAGE=$(CAPSULE_IMG) VERSION=v0.0.0
@$(KUBECTL) create ns capsule-system || true
$(MAKE) dev-install-deps
$(MAKE) dev-install-grafana-operator-crds
@$(CT) install --config $(SRC_ROOT)/.github/configs/ct.yaml --namespace=capsule-system --all --debug
docker:
@hash docker 2>/dev/null || {\
echo "You need docker" &&\
exit 1;\
}
# Setup development env
dev-build: kind
$(KIND) create cluster --wait=60s --name $(CLUSTER_NAME) --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config ./hack/kind-cluster.yaml
$(MAKE) dev-install-deps
.PHONY: dev-destroy
dev-destroy: kind
$(KIND) delete cluster --name capsule
dev-install-deps: dev-setup-fluxcd dev-setup-cert-manager dev-install-gw-api-crds wait-for-helmreleases
API_GW := none
API_GW_VERSION := v1.3.0
API_GW_LOOKUP := kubernetes-sigs/gateway-api
dev-install-gw-api-crds:
@$(KUBECTL) apply --force-conflicts --server-side=true -f https://github.com/$(API_GW_LOOKUP)/releases/download/$(API_GW_VERSION)/standard-install.yaml
GRAFANA := none
GRAFANA_VERSION := v5.18.0
GRAFANA_LOOKUP := grafana/grafana-operator
dev-install-grafana-operator-crds:
@$(KUBECTL) apply --force-conflicts --server-side=true -f https://github.com/grafana/grafana-operator/releases/download/$(GRAFANA_VERSION)/crds.yaml
PROMETHEUS := none
PROMETHEUS_VERSION := v0.88.0
PROMETHEUS_LOOKUP := prometheus-operator/prometheus-operator
dev-install-prometheus-crds:
@$(KUBECTL) apply --force-conflicts --server-side=true -f https://github.com/prometheus-operator/prometheus-operator/releases/download/$(PROMETHEUS_VERSION)/bundle.yaml
# Usage:
# Usage:
# LAPTOP_HOST_IP=<YOUR_LAPTOP_IP> make dev-setup
# For example:
# LAPTOP_HOST_IP=192.168.10.101 make dev-setup
@@ -155,7 +125,7 @@ IP.1 = $(LAPTOP_HOST_IP)
endef
export TLS_CNF
dev-setup:
$(KUBECTL) -n capsule-system scale deployment capsule-controller-manager --replicas=0 || true
kubectl -n capsule-system scale deployment capsule-controller-manager --replicas=0
mkdir -p /tmp/k8s-webhook-server/serving-certs
echo "$${TLS_CNF}" > _tls.cnf
openssl req -newkey rsa:4096 -days 3650 -nodes -x509 \
@@ -164,362 +134,143 @@ dev-setup:
-config _tls.cnf \
-keyout /tmp/k8s-webhook-server/serving-certs/tls.key \
-out /tmp/k8s-webhook-server/serving-certs/tls.crt
$(KUBECTL) create secret tls capsule-tls -n capsule-system \
--cert=/tmp/k8s-webhook-server/serving-certs/tls.crt\
--key=/tmp/k8s-webhook-server/serving-certs/tls.key || true
rm -f _tls.cnf
rm -f _tls.cnf
export WEBHOOK_URL="https://$${LAPTOP_HOST_IP}:9443"; \
export CA_BUNDLE=`openssl base64 -in /tmp/k8s-webhook-server/serving-certs/tls.crt | tr -d '\n'`; \
$(HELM) upgrade \
--dependency-update \
--force-conflicts \
--debug \
--install \
--namespace capsule-system \
--create-namespace \
--set 'crds.install=true' \
--set 'crds.exclusive=true'\
--set 'crds.createConfig=true'\
--set "tls.enableController=false"\
--set "webhooks.exclusive=true"\
--set "webhooks.hooks.nodes.enabled=true"\
--set "webhooks.service.url=$${WEBHOOK_URL}" \
--set "webhooks.service.caBundle=$${CA_BUNDLE}" \
capsule \
./charts/capsule || true
setup-monitoring: dev-setup-fluxcd
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/monitoring | envsubst | kubectl apply -f -
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/monitoring/dashboards | kubectl apply -f -
@$(MAKE) wait-for-helmreleases
@printf "\n\033[32mAccess Grafana:\033[0m\n\n"
@printf " \033[1mkubectl port-forward svc/kube-prometheus-stack-grafana 9090:80 -n monitoring-system\033[0m\n\n"
dev-setup-monitoring: setup-monitoring
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/host-proxy | envsubst | kubectl apply -f -
dev-setup-argocd: dev-setup-fluxcd
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/argocd | envsubst | kubectl apply -f -
@$(MAKE) wait-for-helmreleases
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/argocd/application | envsubst | kubectl apply -f -
@printf "\n\033[32mAccess ArgoCD:\033[0m\n\n"
@printf " \033[1mkubectl get secret -n argocd argocd-initial-admin-secret -o jsonpath='{.data.password}' | base64 -d\033[0m\n\n"
@printf " \033[1mkubectl port-forward svc/argocd-server 9091:80 -n argocd\033[0m\n\n"
dev-setup-cert-manager:
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/cert-manager | envsubst | kubectl apply -f -
dev-setup-fluxcd:
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/fluxcd | envsubst | kubectl apply -f -
kubectl patch MutatingWebhookConfiguration capsule-mutating-webhook-configuration \
--type='json' -p="[\
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/defaults\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/namespace-owner-reference\",'caBundle':\"$${CA_BUNDLE}\"}}\
]" && \
kubectl patch ValidatingWebhookConfiguration capsule-validating-webhook-configuration \
--type='json' -p="[\
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/cordoning\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/ingresses\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/namespaces\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/networkpolicies\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/4/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/nodes\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/5/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/pods\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/6/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/persistentvolumeclaims\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/services\",'caBundle':\"$${CA_BUNDLE}\"}},\
{'op': 'replace', 'path': '/webhooks/8/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/tenants\",'caBundle':\"$${CA_BUNDLE}\"}}\
]" && \
kubectl patch crd tenants.capsule.clastix.io \
--type='json' -p="[\
{'op': 'replace', 'path': '/spec/conversion/webhook/clientConfig', 'value':{'url': \"$${WEBHOOK_URL}\", 'caBundle': \"$${CA_BUNDLE}\"}}\
]" && \
kubectl patch crd capsuleconfigurations.capsule.clastix.io \
--type='json' -p="[\
{'op': 'replace', 'path': '/spec/conversion/webhook/clientConfig', 'value':{'url': \"$${WEBHOOK_URL}\", 'caBundle': \"$${CA_BUNDLE}\"}}\
]";
# Here to setup the current capsule version
# Intended to test updates to new version
dev-setup-capsule: dev-setup-fluxcd
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/capsule | envsubst | kubectl apply -f -
@$(MAKE) wait-for-helmreleases
@$(MAKE) dev-setup-capsule-example
# Build the docker image
docker-build: test
docker build . -t ${IMG} --build-arg GIT_HEAD_COMMIT=$(GIT_HEAD_COMMIT) \
--build-arg GIT_TAG_COMMIT=$(GIT_TAG_COMMIT) \
--build-arg GIT_MODIFIED=$(GIT_MODIFIED) \
--build-arg GIT_REPO=$(GIT_REPO) \
--build-arg GIT_LAST_TAG=$(VERSION) \
--build-arg BUILD_DATE=$(BUILD_DATE)
dev-setup-capsule-example: dev-setup-fluxcd
@$(KUBECTL) kustomize --load-restrictor='LoadRestrictionsNone' hack/distro/capsule/example-setup | envsubst | kubectl apply -f -
@$(KUBECTL) create ns wind-test --as joe --as-group projectcapsule.dev || true
@$(KUBECTL) create ns wind-prod --as joe --as-group projectcapsule.dev || true
@$(KUBECTL) create ns green-test --as bob --as-group projectcapsule.dev || true
@$(KUBECTL) create ns green-prod --as bob --as-group projectcapsule.dev || true
@$(KUBECTL) create ns solar-test --as alice --as-group projectcapsule.dev || true
@$(KUBECTL) create ns solar-prod --as alice --as-group projectcapsule.dev || true
@$(KUBECTL) apply -f hack/distro/capsule/example-setup/claims.yaml
# Push the docker image
docker-push:
docker push ${IMG}
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.10.0)
wait-for-helmreleases:
@ echo "Waiting for all HelmReleases to have observedGeneration >= 0..."
@while [ "$$($(KUBECTL) get helmrelease -A -o jsonpath='{range .items[?(@.status.observedGeneration<0)]}{.metadata.namespace}{" "}{.metadata.name}{"\n"}{end}' | wc -l)" -ne 0 ]; do \
sleep 5; \
done
ENTERPRISE_VERSION ?= "0.13.0-rc.2"
ENTERPRISE_REGISTRY ?= "oci.peakscale.ch"
enterprise-prerelease:
mkdir -p ./builds
$(MAKE) CAPSULE_IMG=$(ENTERPRISE_REGISTRY)/prereleases/images/capsule VERSION=$(ENTERPRISE_VERSION) ko-publish-capsule
$(HELM) package ./charts/capsule --app-version=$(ENTERPRISE_VERSION) --version=$(ENTERPRISE_VERSION) --destination ./builds/
$(HELM) push ./builds/capsule-$(ENTERPRISE_VERSION).tgz oci://$(ENTERPRISE_REGISTRY)/prereleases/charts/
$(MAKE) deploy-enterprise
rm -rf ./builds
deploy-enterprise:
@echo ""
@echo "Deploying Capsule Prerelease (Enterprise) $(ENTERPRISE_VERSION)"
@echo ""
@echo "1) Create image pull secret (Change the credentials with the ones provided to you):"
@echo ""
@echo "kubectl create secret docker-registry capsule-enterprise -n capsule-system \\"
@echo " --docker-username='robot\$$name' \\"
@echo " --docker-password='serviceaccount-password' \\"
@echo " --docker-server='$(ENTERPRISE_REGISTRY)'"
@echo ""
@echo "2) Deploy Capsule:"
@echo ""
@echo "helm upgrade --install capsule \\"
@echo " oci://$(ENTERPRISE_REGISTRY)/prereleases/charts/capsule \\"
@echo " --namespace capsule-system \\"
@echo " --version $(ENTERPRISE_VERSION) \\"
@echo " --reuse-values \\"
@echo " --set manager.image.registry=$(ENTERPRISE_REGISTRY) \\"
@echo " --set manager.image.repository=prereleases/images/capsule \\"
@echo " --set manager.image.tag=$(ENTERPRISE_VERSION) \\"
@echo " --set manager.image.pullPolicy=Always \\"
@echo " --set 'serviceAccount.imagePullSecrets={capsule-enterprise}'"
@echo ""
####################
# -- Docker
####################
KO_PLATFORM ?= linux/$(GOARCH)
KOCACHE ?= /tmp/ko-cache
KO_REGISTRY := ko.local
KO_TAGS ?= "latest"
ifdef VERSION
KO_TAGS := $(KO_TAGS),$(VERSION)
endif
LD_FLAGS := "-X main.Version=$(VERSION) \
-X main.GitCommit=$(GIT_HEAD_COMMIT) \
-X main.GitTag=$(VERSION) \
-X main.GitDirty=$(GIT_MODIFIED) \
-X main.BuildTime=$(BUILD_DATE) \
-X main.GitRepo=$(GIT_REPO)"
# Docker Image Build
# ------------------
.PHONY: ko-build-capsule
ko-build-capsule: ko
@echo Building Capsule $(KO_TAGS) for $(KO_PLATFORM) >&2
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \
$(KO) build ./cmd/ --bare --tags=$(KO_TAGS) --push=false --local --platform=$(KO_PLATFORM)
.PHONY: ko-build-all
ko-build-all: ko-build-capsule
.PHONY: docker-build-capsule-trace
docker-build-capsule-trace: ko-build-capsule
@docker build \
--no-cache \
--build-arg TARGET_IMAGE=$(CAPSULE_IMG):$(VERSION) \
-t $(CAPSULE_IMG):tracing \
-f Dockerfile.tracing .
# Docker Image Publish
# ------------------
REGISTRY_PASSWORD ?= dummy
REGISTRY_USERNAME ?= dummy
.PHONY: ko-login
ko-login: ko
@$(KO) login $(REGISTRY) --username $(REGISTRY_USERNAME) --password $(REGISTRY_PASSWORD)
.PHONY: ko-publish-capsule
ko-publish-capsule: ko-login ## Build and publish kyvernopre image (with ko)
@LD_FLAGS=$(LD_FLAGS) KOCACHE=$(KOCACHE) KO_DOCKER_REPO=$(CAPSULE_IMG) \
$(KO) build ./cmd/ --bare --tags=$(KO_TAGS)
.PHONY: ko-publish-all
ko-publish-all: ko-publish-capsule
# Sorting imports
.PHONY: goimports
goimports:
goimports -w -l -local "github.com/projectcapsule/capsule" .
# Linting code as PR is expecting
.PHONY: golint
golint: golangci-lint
$(GOLANGCI_LINT) run -c .golangci.yaml --verbose
.PHONY: golint-fix
golint-fix: golangci-lint
$(GOLANGCI_LINT) run -c .golangci.yaml --verbose --fix
# Running e2e tests in a KinD instance
.PHONY: e2e
e2e: ginkgo
$(MAKE) e2e-build && $(MAKE) e2e-exec && $(MAKE) e2e-destroy
e2e-build: kind
$(MAKE) dev-build
$(MAKE) e2e-install
.PHONY: e2e-install
e2e-install: helm-controller-version ko-build-all
$(MAKE) e2e-load-image CLUSTER_NAME=$(CLUSTER_NAME) IMAGE=$(CAPSULE_IMG) VERSION=$(VERSION)
$(HELM) upgrade \
--dependency-update \
--debug \
--install \
--namespace capsule-system \
--create-namespace \
--set 'replicaCount=2'\
--set 'manager.image.pullPolicy=Never' \
--set 'manager.resources=null'\
--set "manager.image.tag=$(VERSION)" \
--set 'manager.livenessProbe.failureThreshold=10' \
--set 'webhooks.hooks.nodes.enabled=true' \
--set "webhooks.exclusive=true"\
--set "manager.options.logLevel=debug"\
capsule \
./charts/capsule
.PHONY: trace-install
trace-install:
helm upgrade \
--dependency-update \
--debug \
--install \
--namespace capsule-system \
--create-namespace \
--set 'manager.resources=null'\
--set 'manager.livenessProbe.failureThreshold=10' \
--set 'manager.readinessProbe.failureThreshold=10' \
--values charts/capsule/ci/tracing-values.yaml \
capsule \
./charts/capsule
.PHONY: trace-e2e
trace-e2e: kind
$(MAKE) docker-build-capsule-trace
$(KIND) create cluster --wait=60s --image kindest/node:$(KUBERNETES_SUPPORTED_VERSION) --config hack/kind-cluster.yml
$(MAKE) e2e-load-image CLUSTER_NAME=capsule-tracing IMAGE=$(CAPSULE_IMG) VERSION=tracing
$(MAKE) trace-install
$(MAKE) e2e-install-deps
$(MAKE) e2e-exec
$(KIND) delete cluster --name capsule-tracing
.PHONY: trace-unit
trace-unit: harpoon
$(HARPOON) analyze -e .git/ -e assets/ -e charts/ -e config/ -e docs/ -e e2e/ -e hack/ --directory /tmp/artifacts/ --save
$(HARPOON) hunt -D /tmp/results -F harpoon-report.yml --include-cmd-stdout --save
.PHONY: seccomp
seccomp:
$(HARPOON) build --add-syscall-sets=dynamic,docker -D /tmp/results --name capsule-seccomp.json --save
.PHONY: e2e-load-image
e2e-load-image: kind
$(KIND) load docker-image $(IMAGE):$(VERSION) --name $(CLUSTER_NAME)
.PHONY: e2e-exec
e2e-exec: ginkgo
$(GINKGO) -v -tags e2e ./e2e
.PHONY: e2e-destroy
e2e-destroy: dev-destroy
SPELL_CHECKER = npx spellchecker-cli
docs-lint:
cd docs/content && $(SPELL_CHECKER) -f "*.md" "*/*.md" "!general/crds-apis.md" -d dictionary.txt
####################
# -- Helpers
####################
pull-upstream:
git remote add upstream https://github.com/capsuleproject/capsule.git
git fetch --all && git pull upstream
## Location to install dependencies to
LOCALBIN ?= $(shell pwd)/bin
$(LOCALBIN):
mkdir -p $(LOCALBIN)
####################
# -- Helm Plugins
####################
HELM_SCHEMA_VERSION := ""
helm-plugin-schema:
@$(HELM) plugin install https://github.com/losisin/helm-values-schema-json.git --version $(HELM_SCHEMA_VERSION) --verify=false || true
HELM_DOCS := $(LOCALBIN)/helm-docs
HELM_DOCS_VERSION := v1.14.1
HELM_DOCS_LOOKUP := norwoodj/helm-docs
helm-doc:
@test -s $(HELM_DOCS) || \
$(call go-install-tool,$(HELM_DOCS),github.com/$(HELM_DOCS_LOOKUP)/cmd/helm-docs@$(HELM_DOCS_VERSION))
####################
# -- Tools
####################
CONTROLLER_GEN := $(LOCALBIN)/controller-gen
CONTROLLER_GEN_VERSION ?= v0.20.0
CONTROLLER_GEN_LOOKUP := kubernetes-sigs/controller-tools
controller-gen:
@test -s $(CONTROLLER_GEN) && $(CONTROLLER_GEN) --version | grep -q $(CONTROLLER_GEN_VERSION) || \
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION))
GINKGO := $(LOCALBIN)/ginkgo
ginkgo:
$(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo)
CT := $(LOCALBIN)/ct
CT_VERSION := v3.14.0
CT_LOOKUP := helm/chart-testing
ct:
@test -s $(CT) && $(CT) version | grep -q $(CT_VERSION) || \
$(call go-install-tool,$(CT),github.com/$(CT_LOOKUP)/v3/ct@$(CT_VERSION))
KIND := $(LOCALBIN)/kind
KIND_VERSION := v0.31.0
KIND_LOOKUP := kubernetes-sigs/kind
kind:
@test -s $(KIND) && $(KIND) --version | grep -q $(KIND_VERSION) || \
$(call go-install-tool,$(KIND),sigs.k8s.io/kind/cmd/kind@$(KIND_VERSION))
KO := $(LOCALBIN)/ko
KO_VERSION := v0.18.1
KO_LOOKUP := google/ko
ko:
@test -s $(KO) && $(KO) -h | grep -q $(KO_VERSION) || \
$(call go-install-tool,$(KO),github.com/$(KO_LOOKUP)@$(KO_VERSION))
NWA := $(LOCALBIN)/nwa
NWA_VERSION := v0.7.7
NWA_LOOKUP := B1NARY-GR0UP/nwa
nwa:
@test -s $(NWA) && $(NWA) -h | grep -q $(NWA_VERSION) || \
$(call go-install-tool,$(NWA),github.com/$(NWA_LOOKUP)@$(NWA_VERSION))
GOLANGCI_LINT := $(LOCALBIN)/golangci-lint
GOLANGCI_LINT_VERSION := v2.8.0
GOLANGCI_LINT_LOOKUP := golangci/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.
@test -s $(GOLANGCI_LINT) && $(GOLANGCI_LINT) -h | grep -q $(GOLANGCI_LINT_VERSION) || \
$(call go-install-tool,$(GOLANGCI_LINT),github.com/$(GOLANGCI_LINT_LOOKUP)/v2/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION))
APIDOCS_GEN := $(LOCALBIN)/crdoc
APIDOCS_GEN_VERSION := v0.6.4
APIDOCS_GEN_LOOKUP := fybrik/crdoc
APIDOCS_GEN = $(shell pwd)/bin/crdoc
apidocs-gen: ## Download crdoc locally if necessary.
@test -s $(APIDOCS_GEN) && $(APIDOCS_GEN) --version | grep -q $(APIDOCS_GEN_VERSION) || \
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@$(APIDOCS_GEN_VERSION))
$(call go-install-tool,$(APIDOCS_GEN),fybrik.io/crdoc@latest)
HARPOON := $(LOCALBIN)/harpoon
HARPOON_VERSION := v0.10.2
HARPOON_LOOKUP := alegrey91/harpoon
harpoon:
@mkdir $(LOCALBIN)
@curl -s https://raw.githubusercontent.com/alegrey91/harpoon/main/install | \
sudo bash -s -- --install-version $(HARPOON_VERSION) --install-dir $(LOCALBIN)
GINKGO = $(shell pwd)/bin/ginkgo
ginkgo: ## Download ginkgo locally if necessary.
$(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/ginkgo@v1.16.5)
CT = $(shell pwd)/bin/ct
ct: ## Download ct locally if necessary.
$(call go-install-tool,$(CT),github.com/helm/chart-testing/v3/ct@v3.7.1)
KIND = $(shell pwd)/bin/kind
kind: ## Download kind locally if necessary.
$(call go-install-tool,$(KIND),sigs.k8s.io/kind/cmd/kind@v0.17.0)
KUSTOMIZE = $(shell pwd)/bin/kustomize
kustomize: ## Download kustomize locally if necessary.
$(call install-kustomize,$(KUSTOMIZE),3.8.7)
define install-kustomize
@[ -f $(1) ] || { \
set -e ;\
echo "Installing v$(2)" ;\
cd bin ;\
wget "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" ;\
bash ./install_kustomize.sh $(2) ;\
}
endef
# go-install-tool will 'go install' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-install-tool
[ -f $(1) ] || { \
set -e ;\
GOBIN=$(LOCALBIN) go install $(2) ;\
@[ -f $(1) ] || { \
set -e ;\
echo "Installing $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
}
endef
# Generate bundle manifests and metadata, then validate generated files.
bundle: manifests
operator-sdk generate kustomize manifests -q
kustomize build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
operator-sdk bundle validate ./bundle
# Build the bundle image.
bundle-build:
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
# Sorting imports
.PHONY: goimports
goimports:
goimports -w -l -local "github.com/clastix/capsule" .
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
golangci-lint: ## Download golangci-lint locally if necessary.
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2)
# Linting code as PR is expecting
.PHONY: golint
golint: golangci-lint
$(GOLANGCI_LINT) run -c .golangci.yml
# Running e2e tests in a KinD instance
.PHONY: e2e
e2e/%: ginkgo
$(MAKE) e2e-build/$* && $(MAKE) e2e-exec || $(MAKE) e2e-destroy
e2e-build/%:
kind create cluster --wait=60s --name capsule --image=kindest/node:$*
make docker-build
kind load docker-image --nodes capsule-control-plane --name capsule $(IMG)
helm upgrade \
--debug \
--install \
--namespace capsule-system \
--create-namespace \
--set 'manager.image.pullPolicy=Never' \
--set 'manager.resources=null'\
--set "manager.image.tag=$(VERSION)" \
--set 'manager.livenessProbe.failureThreshold=10' \
--set 'manager.readinessProbe.failureThreshold=10' \
capsule \
./charts/capsule
e2e-exec: ginkgo
$(GINKGO) -v -tags e2e ./e2e
e2e-destroy:
kind delete cluster --name capsule

60
PROJECT
View File

@@ -1,29 +1,44 @@
# Code generated by tool. DO NOT EDIT.
# This file is used to track the info used to scaffold your project
# and allow the plugins properly work.
# More info: https://book.kubebuilder.io/reference/project-config.html
domain: clastix.io
layout:
- go.kubebuilder.io/v4
- go.kubebuilder.io/v3
plugins:
manifests.sdk.operatorframework.io/v2: {}
scorecard.sdk.operatorframework.io/v2: {}
projectName: capsule
repo: github.com/projectcapsule/capsule
repo: github.com/clastix/capsule
resources:
- api:
crdVersion: v1
controller: true
domain: clastix.io
group: capsule
kind: Tenant
path: github.com/clastix/capsule/api/v1alpha1
version: v1alpha1
webhooks:
conversion: true
webhookVersion: v1
- api:
crdVersion: v1
controller: true
domain: clastix.io
group: capsule
kind: CapsuleConfiguration
path: github.com/clastix/capsule/api/v1alpha1
version: v1alpha1
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: Tenant
path: github.com/projectcapsule/capsule/api/v1beta1
path: github.com/clastix/capsule/api/v1beta1
version: v1beta1
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: Tenant
path: github.com/projectcapsule/capsule/api/v1beta2
path: github.com/clastix/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
@@ -31,7 +46,7 @@ resources:
domain: clastix.io
group: capsule
kind: CapsuleConfiguration
path: github.com/projectcapsule/capsule/api/v1beta2
path: github.com/clastix/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
@@ -39,36 +54,13 @@ resources:
domain: clastix.io
group: capsule
kind: TenantResource
path: github.com/projectcapsule/capsule/api/v1beta2
path: github.com/clastix/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: GlobalTenantResource
path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: ResourcePool
path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
namespaced: true
controller: true
domain: clastix.io
group: capsule
kind: ResourcePoolClaim
path: github.com/projectcapsule/capsule/api/v1beta2
version: v1beta2
- api:
crdVersion: v1
domain: clastix.io
group: capsule
kind: TenantOwner
path: github.com/projectcapsule/capsule/api/v1beta2
path: github.com/clastix/capsule/api/v1beta2
version: v1beta2
version: "3"

View File

@@ -2,28 +2,16 @@
<p align="left">
<img src="https://img.shields.io/github/license/clastix/capsule"/>
<img src="https://img.shields.io/github/go-mod/go-version/clastix/capsule"/>
<a href="https://github.com/projectcapsule/capsule/releases">
<a href="https://github.com/clastix/capsule/releases">
<img src="https://img.shields.io/github/v/release/clastix/capsule"/>
</a>
<a href="https://charmhub.io/capsule-k8s">
<img src="https://charmhub.io/capsule-k8s/badge.svg"/>
</a>
<a href="https://www.bestpractices.dev/projects/5601">
<img src="https://www.bestpractices.dev/projects/5601/badge"/>
</a>
<a href="https://api.securityscorecards.dev/projects/github.com/projectcapsule/capsule/badge">
<img src="https://api.securityscorecards.dev/projects/github.com/projectcapsule/capsule/badge"/>
</a>
<a href="https://artifacthub.io/packages/search?repo=projectcapsule">
<img src="https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/projectcapsule"/>
</a>
<a href="https://app.fossa.com/projects/git%2Bgithub.com%2Fprojectcapsule%2Fcapsule?ref=badge_shield&issueType=license" alt="FOSSA Status">
<img src="https://app.fossa.com/api/projects/git%2Bgithub.com%2Fprojectcapsule%2Fcapsule.svg?type=shield&issueType=license"/>
</a>
</p>
<p align="center">
<img src="assets/logo/capsule.svg" height=560 />
<img src="assets/logo/capsule_medium.png" />
</p>
---
@@ -40,9 +28,9 @@ Kubernetes introduces the _Namespace_ object type to create logical partitions o
# Entering Capsule
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
# Features
@@ -76,58 +64,60 @@ Assign to tenants a dedicated set of compute, storage, and network resources and
# Documentation
Please check the project [documentation](https://projectcapsule.dev) for the cool things you can do with Capsule.
Please, check the project [documentation](https://capsule.clastix.io) for the cool things you can do with Capsule.
# Contributions
Capsule is Open Source with Apache 2 license and any contribution is welcome.
## Community meeting
## Chart Development
Join the community, share and learn from it. You can find all the resources to how to contribute code and docs, connect with people in the [community repository](https://github.com/projectcapsule/capsule-community).
### Chart Linting
Please read the [code of conduct](CODE_OF_CONDUCT.md).
The chart is linted with [ct](https://github.com/helm/chart-testing). You can run the linter locally with this command:
```
make helm-lint
```
### Chart Documentation
The documentation for each chart is done with [helm-docs](https://github.com/norwoodj/helm-docs). This way we can ensure that values are consistent with the chart documentation. Run this anytime you make changes to a `values.yaml` file:
```
make helm-docs
```
## Community
Join the community, share and learn from it. You can find all the resources to how to contribute code and docs, connect with people in the [community repository](https://github.com/clastix/capsule-community).
## Adopters
See the [ADOPTERS.md](ADOPTERS.md) file for a list of companies that are using Capsule.
# Project Governance
# Governance
You can find how the Capsule project is governed [here](https://projectcapsule.dev/project/governance/).
You can find how the Capsule project is governed [here](https://capsule.clastix.io/docs/contributing/governance).
## Maintainers
Please refer to the maintainers file available [here](.github/maintainers.yaml).
## CLOMonitor
CLOMonitor is a tool that periodically checks open source project repositories to verify they meet certain project health best practices.
[![CloMonitor report summary](https://clomonitor.io/api/projects/cncf/capsule/report-summary?theme=light)](https://clomonitor.io/projects/cncf/capsule)
### Changelog
Read how we log changes [here](CHANGELOG.md)
### Software Bill of Materials
All OCI release artifacts include a Software Bill of Materials (SBOM) in CycloneDX JSON format. More information about this is available [here](SECURITY.md#software-bill-of-materials-sbom)
Please, refer to the maintainers file available [here](.github/maintainers.yaml).
# FAQ
- Q. How do you pronounce Capsule?
- Q. How to pronounce Capsule?
A. It should be pronounced as `/ˈkæpsjuːl/`.
- Q. Is it production grade?
A. Although under frequent development and improvement, Capsule is ready to be used in production environments as currently, people are using it in public and private deployments. Check out the [release](https://github.com/projectcapsule/capsule/releases) page for a detailed list of available versions.
A. Although under frequent development and improvements, Capsule is ready to be used in production environments as currently, people are using it in public and private deployments. Check out the [release](https://github.com/clastix/capsule/releases) page for a detailed list of available versions.
- Q. Does it work with my Kubernetes XYZ distribution?
A. We tested Capsule with vanilla Kubernetes 1.16+ on private environments and public clouds. We expect it to work smoothly on any other Kubernetes distribution. Please let us know if you find it doesn't.
A. We tested Capsule with vanilla Kubernetes 1.16+ on private environments and public clouds. We expect it to work smoothly on any other Kubernetes distribution. Please, let us know if you find it doesn't.
- Q. Do you provide commercial support?
A. Yes, we're available to help and provide commercial support. [Clastix](https://clastix.io) is the company behind Capsule. Please, contact us for a quote.
A. Yes, we're available to help and provide commercial support. [Clastix](https://clastix.io) is the company behind Capsule. Please, contact us for a quote.

View File

@@ -1,3 +0,0 @@
# Roadmap
future features and fixes are planned with [release milestones on GitHub](https://github.com/projectcapsule/capsule/milestones?direction=asc&sort=due_date&state=open). You can influence the roadmap by opening issues or joining our community meetings.

View File

@@ -1,59 +0,0 @@
# Reference https://github.com/ossf/security-insights-spec/blob/v1.0.0/specification.md
header:
schema-version: 1.0.0
expiration-date: '2024-10-24T01:00:00.000Z'
last-updated: '2023-10-24'
last-reviewed: '2023-10-24'
project-url: https://github.com/projectcapsule/capsule
changelog: https://github.com/projectcapsule/capsule/blob/main/CHANGELOG.md
license: https://github.com/projectcapsule/capsule/blob/main/LICENSE
project-lifecycle:
status: active
bug-fixes-only: false
core-maintainers:
- github:prometherion
- github:oliverbaehler
- github:bsctl
- github:MaxFedotov
distribution-points:
- https://github.com/orgs/projectcapsule/packages?repo_name=capsule
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
contributing-policy: https://github.com/projectcapsule/capsule/blob/main/CONTRIBUTING.md
code-of-conduct: https://github.com/projectcapsule/capsule/blob/main/CODE_OF_CONDUCT.md
vulnerability-reporting:
accepts-vulnerability-reports: true
security-policy: https://github.com/projectcapsule/capsule/blob/main/SECURITY.md
email-contact: cncf-capsule-maintainers@lists.cncf.io
comment: |
Report a vulnerability by using private security issues in GitHub.
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: latest
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
Dependabot is enabled for this repo.
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/projectcapsule/capsule/blob/main/go.mod
env-dependencies-policy:
policy-url: https://github.com/projectcapsule/capsule/blob/main/DEPENDENCY.md
sbom:
- sbom-file: https://github.com/projectcapsule/capsule/pkgs/container/sbom
sbom-format: CycloneDX
sbom-url: https://github.com/projectcapsule/capsule/blob/main/SECURITY.md#software-bill-of-materials-sbom
security-artifacts:
self-assessment:
self-assessment-created: true
evidence-url:
- https://github.com/projectcapsule/capsule/blob/main/SELF_ASSESSMENT.md
security-contacts:
- type: email
value: cncf-capsule-maintainers@lists.cncf.io
primary: true

View File

@@ -1,122 +0,0 @@
# Security Policy
The Capsule community has adopted this security disclosures and response policy to ensure we responsibly handle critical issues.
## Bulletins
For information regarding the security of this project please join our [slack channel](https://kubernetes.slack.com/archives/C03GETTJQRL).
## Covered Repositories and Issues
When we say "a security vulnerability in capsule" we mean a security issue
in any repository under the [projectcapsule GitHub organization](https://github.com/projectcapsule/).
This reporting process is intended only for security issues in the capsule
project itself, and doesn't apply to applications _using_ capsule or to
issues which do not affect security.
Don't use this process if:
* You have issues with your capsule installation or configuration
* Your issue is not security related
### Explicitly Not Covered: Vulnerability Scanner Reports
We do not accept reports which amount to copy and pasted output from a vulnerability
scanning tool **unless** work has specifically been done to confirm that a vulnerability
reported by the tool _actually exists_ in capsule.
## Reporting a Vulnerability
To report a security issue or vulnerability, [submit a private vulnerability report via GitHub](https://github.com/projectcapsule/capsule/security/advisories/new) to the repository maintainers with a description of the issue, the steps you took to create the issue, affected versions, and, if known, mitigations for the issue.
Describe the issue in English, ideally with some example configuration or code which allows the issue to be reproduced. Explain why you believe this to be a security issue in capsule, if that's not obvious. should contain the following:
* description of the problem
* precise and detailed steps (include screenshots)
* the affected version(s). This may also include environment relevant versions.
* any possible mitigations
If the issue is confirmed as a vulnerability, we will open a Security Advisory and acknowledge your contributions as part of it.
## Reponse
Response times could be affected by weekends, holidays, breaks or time zone differences. That said, the security response team will endeavour to reply as soon as possible, ideally within 5 working days.
## Security Contacts
[Maintainers](./github/maintainers.yaml) of this project are responsible for the security of the project as outlined in this policy.
# Release Artifacts
[See all the available artifacts](https://github.com/orgs/projectcapsule/packages?repo_name=capsule)
## Verifing
To verify artifacts you need to have [cosign installed](https://github.com/sigstore/cosign#installation). This guide assumes you are using v2.x of cosign. All of the signatures are created using [keyless signing](https://docs.sigstore.dev/verifying/verify/#keyless-verification-using-openid-connect). You can set the environment variable `COSIGN_REPOSITORY` to point to this repository. For example:
# Docker Image
export COSIGN_REPOSITORY=ghcr.io/projectcapsule/capsule
# Helm Chart
export COSIGN_REPOSITORY=ghcr.io/projectcapsule/charts/capsule
To verify the signature of the docker image, run the following command. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/capsule):
COSIGN_REPOSITORY=ghcr.io/projectcapsule/charts/capsule cosign verify ghcr.io/projectcapsule/capsule:<release_tag> \
--certificate-identity-regexp="https://github.com/projectcapsule/capsule/.github/workflows/docker-publish.yml@refs/tags/*" \
--certificate-oidc-issuer="https://token.actions.githubusercontent.com" | jq
To verify the signature of the helm image, run the following command. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/charts%2Fcapsule):
COSIGN_REPOSITORY=ghcr.io/projectcapsule/charts/capsule cosign verify ghcr.io/projectcapsule/charts/capsule:<release_tag> \
--certificate-identity-regexp="https://github.com/projectcapsule/capsule/.github/workflows/helm-publish.yml@refs/tags/*" \
--certificate-oidc-issuer="https://token.actions.githubusercontent.com" | jq
## Verifying Provenance
Capsule creates and attests to the provenance of its builds using the [SLSA standard](https://slsa.dev/spec/v0.2/provenance) and meets the [SLSA Level 3](https://slsa.dev/spec/v0.1/levels) specification. The attested provenance may be verified using the cosign tool.
Verify the provenance of the docker image. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/capsule)
```bash
cosign verify-attestation --type slsaprovenance \
--certificate-identity-regexp="https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@refs/tags/*" \
--certificate-oidc-issuer="https://token.actions.githubusercontent.com" \
ghcr.io/projectcapsule/capsule:<release_tag> | jq .payload -r | base64 --decode | jq
```
Verify the provenance of the helm image. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/charts%2Fcapsule)
```bash
cosign verify-attestation --type slsaprovenance \
--certificate-identity-regexp="https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@refs/tags/*" \
--certificate-oidc-issuer="https://token.actions.githubusercontent.com" \
ghcr.io/projectcapsule/charts/capsule:<release_tag> | jq .payload -r | base64 --decode | jq
```
## Software Bill of Materials (SBOM)
An SBOM (Software Bill of Materials) in CycloneDX JSON format is published for each release, including pre-releases. You can set the environment variable `COSIGN_REPOSITORY` to point to this repository. For example:
# Docker Image
export COSIGN_REPOSITORY=ghcr.io/projectcapsule/capsule
# Helm Chart
export COSIGN_REPOSITORY=ghcr.io/projectcapsule/charts/capsule
To inspect the SBOM of the docker image, run the following command. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/capsule):
COSIGN_REPOSITORY=ghcr.io/projectcapsule/capsule cosign download sbom ghcr.io/projectcapsule/capsule:<release_tag>
To inspect the SBOM of the helm image, run the following command. Replace `<release_tag>` with an [available release tag](https://github.com/projectcapsule/capsule/pkgs/container/charts%2Fcapsule):
COSIGN_REPOSITORY=ghcr.io/projectcapsule/charts/capsule cosign download sbom ghcr.io/projectcapsule/charts/capsule:<release_tag>
# Credits
Our Security Policy and Workflows are based on the work of the [Kyverno](https://github.com/kyverno) and [Cert-Manager](https://github.com/cert-manager) community.

View File

@@ -1,201 +0,0 @@
# Capsule Security Self-Assessment
## Metadata
<table>
<tr>
<td>Software
</td>
<td><a href="https://github.com/projectcapsule/capsule">https://github.com/projectcapsule/capsule</a>
</td>
</tr>
<tr>
<td>Website
</td>
<td><a href="https://capsule.clastix.io/">https://capsule.clastix.io/</a>
</td>
</tr>
<tr>
<td>Security Provider
</td>
<td>No
</td>
</tr>
<tr>
<td>Languages
</td>
<td>Golang
</td>
</tr>
<tr>
<td>SBOM
</td>
<td><a href="https://github.com/projectcapsule/capsule/pkgs/container/sbom">https://github.com/projectcapsule/capsule/pkgs/container/sbom</a>
</td>
</tr>
</table>
## Security Links
<table>
<tr>
<td><strong>Doc</strong>
</td>
<td><strong>URL</strong>
</td>
</tr>
<tr>
<td>Security file
</td>
<td><a href="https://github.com/projectcapsule/capsule/blob/main/SECURITY.md">https://github.com/projectcapsule/capsule/blob/main/SECURITY.md</a>
</td>
</tr>
<tr>
<td>Default and optional configs
</td>
<td><a href="https://github.com/projectcapsule/capsule/blob/main/charts/capsule/values.yaml">https://github.com/projectcapsule/capsule/blob/main/charts/capsule/values.yaml</a>
</td>
</tr>
</table>
## Overview
Capsule implements a multi-tenant and policy-based environment in your Kubernetes cluster.
It is designed as a micro-services-based ecosystem with a minimalist approach, leveraging only upstream Kubernetes.
### Background
Capsule takes a different approach.
In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called Tenant, basically a grouping of Kubernetes Namespaces.
Within each tenant, users are free to create their namespaces and share all the assigned resources.
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other.
Network and Security Policies, Resource Quota, Limit Ranges, RBAC, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant.
Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
Capsule was accepted as a CNCF sandbox project in December 2022.
## Actors
### Capsule Operator
It's the Operator which provides all the multi-tenant capabilities offered by Capsule.
It's made of two internal components, such as the webhooks server (known as _policy engine_), and the _tenant controller_.
**Capsule Tenant Controller**
The controller is responsible for managing the tenants by reconciling the required objects at the Namespace level, such as _Network Policy_, _LimitRange_, _ResourceQuota_, _Role Binding_, as well as labelling the Namespace objects belonging to a Tenant according to their desired metadata.
It is responsible for binding Namespaces to the selected Tenant, and managing their lifecycle.
Furthermore, the manager can replicate objects thanks to the **Tenant Resource** API, which offers two levels of interactions: a cluster-scoped one thanks to the `GlobalTenantResource` API, and a namespace-scoped one named `TenantResource`.
The replicated resources are dynamically created, and replicated by Capsule itself, as well as preserving the deletion of these objects by the Tenant owner.
**Capsule Tenant Controller (Policy Engine)**
Policies are defined on a Tenant basis: therefore the policy engine is enforcing these policies on the tenants's Namespaces and their children's resources.
The Policy Engine is currently not a dedicated component, but a part of the Capsule Tenant Controller.
The webhook server, also known as the policy engine, interpolates the Tenant rules and takes full advantage of the dynamic admission controllers offered by Kubernetes itself (such as `ValidatingWebhookConfiguration` and `MutatingWebhookConfiguration`).
Thanks to the _policy engine_ the cluster administrators can enforce specific rules such as preventing _Pod_ objects from untrusted registries to run or preventing the creation of _PersistentVolumeClaim_ resources using a non-allowed _StorageClass_, etc.
It also acts as a defaulter webhook, offloading the need to specify some classes (IngressClass, StorageClass, RuntimeClass, etc.).
### Capsule Proxy
The `capsule-proxy` is an addon which is offering a Kubernetes API Server shim aware of the multi-tenancy levels implemented by Capsule.
It's essentially a reverse proxy that decorates the incoming requests with the required `labelSelector` query string parameters to filter out some objects, such as:
- Namespaces
- IngressClass
- StorageClass
- PriorityClass
- RuntimeClass
- PersistentVolumes
Permissions on those resources are not enforced through the classic Kubernetes RBAC but rather at the Tenant Owner level, allowing fine-grained control over specific resources.
`capsule-proxy` is not serving itself Kubernetes API server responses, but rather, it's acting as a middle proxy server offering dynamic filtering of requests: this means the resulting responses from the upstream are not mangled, and don't require any additional plugins, or third-party binaries, and integrating with any external components, such as the Kubernetes dashboard, the `kubectl` binary, etc.
## Actions
Tenants are created by cluster administrators, who have the right to create Tenant custom resource instances.
End users should not manage tenants.
Therefore users without any cluster administration rights can't list tenants or create tenants.
## Creating namespaces in a tenant
When creating a tenant, the Capsule controller inspects the user's supplied groups and matches, if the groups were defined in the `capsuleConfiguration`. If at least one matching group is found, the user request is considered by Capsule. If not, Capsule ignores the request and does not perform any action. This also applies to modifications (`UPDATE` request).
To create namespaces within a tenant a User, Group or ServiceAccount must be configured as owner of the tenant.
A namespace is assigned to a tenant based on its label, its owner (if the owner only has one tenant) or the prefix of the namespace (which matches the tenant name).
If the request is considered, the namespace is created with all the configuration on the tenant, which is relevant. The additional resources (NetworkPolicy, ResourceQuota, LimitRange) are created in the new namespace.
### Applying Workload and configs to namespaces within a tenant
Whenever a tenant user applies new workloads or configs to a namespace, the capsule controller inspects the namespace and checks if it belongs to a tenant. If so, the capsule controller applies policies from the tenant configuration to the given workloads and configs.
If there are defaults defined on the tenant, they are applied to the workloads as well.
This is a further abstraction from having cluster defaults (eg. default `StorageClass`) to having tenant defaults (eg. default `StorageClass` for a tenant).
### Goals
**General**
* **Multitenancy**: Capsule should be able to support multiple tenants in a single Kubernetes cluster without introducing overhead or cognitive load, and barely relying on Namespace objects.
* **Kubernetes agnostic**: Capsule should integrate with Kubernetes primitives, such as _RBAC_, _NetworkPolicy_, _LimitRange_, and _ResourceQuota_.
* **Policy-based**: Capsule should be able to enforce policies on tenants, which are defined on a tenant basis.
* **Native User Experience**: Capsule shouldn't increase the cognitive load of developers, such as introducing `kubectl` plugins, or forcing the tenant owners to operate their tenant objects using Custom Resource Definitions.
### Non-Goals
**General**
* **Control Plane**: Capsule can't mimic for each tenant a feeling of a dedicated control plane.
* **Custom Resource Definitions**: Capsule doesn't want to provide virtual cluster capabilities and it's sticking to the native Kubernetes user experience and design; rather, its focus is to provide a governance solution by focusing on resource optimization and security lockdown.
## Self-assessment use
This self-assessment is created by the Capsule team to perform an internal analysis of the project's security.
It is not intended to provide a security audit of Capsule, or function as an independent assessment or attestation of Capsules security health.
This document serves to provide Capsule users with an initial understanding of Capsule's security, where to find existing security documentation, Capsule plans for security, and a general overview of Capsule security practices, both for the development of Capsule as well as security of Capsule.
This document provides the CNCF TAG-Security with an initial understanding of Capsule to assist in a joint review, necessary for projects under incubation. Taken together, this document and the joint review serve as a cornerstone for if and when Capsule seeks graduation.
## Security functions and features
See [Actors](#actors) and [Actions](#actions) for a more detailed description of the critical actors, actions, and potential threats.
## Project compliance
As of now, not applicable.
## Secure development practices
The Capsule project follows established CNCF and OSS best practices for code development and delivery.
Capsule follows [OpenSSF Best Practices](https://www.bestpractices.dev/en/projects/5601).
Although not perfect yet, we are constantly trying to improve and score optimal scores.
We will assess the issues during our community meetings and try to plan them for future releases.
### Development Pipeline
Changes must be reviewed and merged by the project maintainers.
Before changes are merged, all the changes must pass static checks, license checks, verifications on `gofmt`, `go lint`, `go vet`, and pass all unit tests and e2e tests.
Changes are scanned by trivy for the docker images.
We run E2E tests for different Kubernetes versions on Pull Requests.
Code changes are submitted via Pull Requests (PRs) and must be signed and verified.
Commits to the main branch directly are not allowed.
## Security issue resolution
Capsule project vulnerability handling related processes are recorded in the [Capsule Security Doc](https://github.com/projectcapsule/capsule/blob/main/SECURITY.md).
Related security vulnerabilities can be reported and communicated via email to [cncf-capsule-maintainers@lists.cncf.io](mailto:cncf-capsule-maintainers@lists.cncf.i).
## Appendix
All Capsule security-related issues (both fixes and enhancements) are labelled with "security" and can be queried using[ https://github.com/projectcapsule/capsule/labels/security](https://github.com/projectcapsule/capsule/labels/security).
The code review process requires maintainers to consider security while reviewing designs and pull requests.

View File

@@ -0,0 +1,9 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
type AdditionalMetadata struct {
Labels map[string]string `json:"additionalLabels,omitempty"`
Annotations map[string]string `json:"additionalAnnotations,omitempty"`
}

View File

@@ -0,0 +1,15 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
const (
ForbiddenNodeLabelsAnnotation = "capsule.clastix.io/forbidden-node-labels"
ForbiddenNodeLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-node-labels-regexp"
ForbiddenNodeAnnotationsAnnotation = "capsule.clastix.io/forbidden-node-annotations"
ForbiddenNodeAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-node-annotations-regexp"
TLSSecretNameAnnotation = "capsule.clastix.io/tls-secret-name"
MutatingWebhookConfigurationName = "capsule.clastix.io/mutating-webhook-configuration-name"
ValidatingWebhookConfigurationName = "capsule.clastix.io/validating-webhook-configuration-name"
EnableTLSConfigurationAnnotationName = "capsule.clastix.io/enable-tls-configuration"
)

View File

@@ -0,0 +1,47 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CapsuleConfigurationSpec defines the Capsule configuration.
type CapsuleConfigurationSpec struct {
// Names of the groups for Capsule users.
// +kubebuilder:default={capsule.clastix.io}
UserGroups []string `json:"userGroups,omitempty"`
// Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix,
// separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
// +kubebuilder:default=false
ForceTenantPrefix bool `json:"forceTenantPrefix,omitempty"`
// Disallow creation of namespaces, whose name matches this regexp
ProtectedNamespaceRegexpString string `json:"protectedNamespaceRegex,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:scope=Cluster
// CapsuleConfiguration is the Schema for the Capsule configuration API.
type CapsuleConfiguration struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CapsuleConfigurationSpec `json:"spec,omitempty"`
}
func (in *CapsuleConfiguration) Hub() {}
// +kubebuilder:object:root=true
// CapsuleConfigurationList contains a list of CapsuleConfiguration.
type CapsuleConfigurationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CapsuleConfiguration `json:"items"`
}
func init() {
SchemeBuilder.Register(&CapsuleConfiguration{}, &CapsuleConfigurationList{})
}

View File

@@ -0,0 +1,21 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"os"
ctrl "sigs.k8s.io/controller-runtime"
)
func (in *CapsuleConfiguration) SetupWebhookWithManager(mgr ctrl.Manager) error {
certData, _ := os.ReadFile("/tmp/k8s-webhook-server/serving-certs/tls.crt")
if len(certData) == 0 {
return nil
}
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}

View File

@@ -0,0 +1,583 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/pkg/errors"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/conversion"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
"github.com/clastix/capsule/pkg/api"
)
const (
resourceQuotaScopeAnnotation = "capsule.clastix.io/resource-quota-scope"
podAllowedImagePullPolicyAnnotation = "capsule.clastix.io/allowed-image-pull-policy"
podPriorityAllowedAnnotation = "priorityclass.capsule.clastix.io/allowed"
podPriorityAllowedRegexAnnotation = "priorityclass.capsule.clastix.io/allowed-regex"
enableNodePortsAnnotation = "capsule.clastix.io/enable-node-ports"
enableExternalNameAnnotation = "capsule.clastix.io/enable-external-name"
enableLoadBalancerAnnotation = "capsule.clastix.io/enable-loadbalancer-service"
ownerGroupsAnnotation = "owners.capsule.clastix.io/group"
ownerUsersAnnotation = "owners.capsule.clastix.io/user"
ownerServiceAccountAnnotation = "owners.capsule.clastix.io/serviceaccount"
enableNodeListingAnnotation = "capsule.clastix.io/enable-node-listing"
enableNodeUpdateAnnotation = "capsule.clastix.io/enable-node-update"
enableNodeDeletionAnnotation = "capsule.clastix.io/enable-node-deletion"
enableStorageClassListingAnnotation = "capsule.clastix.io/enable-storageclass-listing"
enableStorageClassUpdateAnnotation = "capsule.clastix.io/enable-storageclass-update"
enableStorageClassDeletionAnnotation = "capsule.clastix.io/enable-storageclass-deletion"
enableIngressClassListingAnnotation = "capsule.clastix.io/enable-ingressclass-listing"
enableIngressClassUpdateAnnotation = "capsule.clastix.io/enable-ingressclass-update"
enableIngressClassDeletionAnnotation = "capsule.clastix.io/enable-ingressclass-deletion"
enablePriorityClassListingAnnotation = "capsule.clastix.io/enable-priorityclass-listing"
enablePriorityClassUpdateAnnotation = "capsule.clastix.io/enable-priorityclass-update"
enablePriorityClassDeletionAnnotation = "capsule.clastix.io/enable-priorityclass-deletion"
ingressHostnameCollisionScope = "ingress.capsule.clastix.io/hostname-collision-scope"
)
func (in *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
serviceKindToAnnotationMap := map[capsulev1beta1.ProxyServiceKind][]string{
capsulev1beta1.NodesProxy: {enableNodeListingAnnotation, enableNodeUpdateAnnotation, enableNodeDeletionAnnotation},
capsulev1beta1.StorageClassesProxy: {enableStorageClassListingAnnotation, enableStorageClassUpdateAnnotation, enableStorageClassDeletionAnnotation},
capsulev1beta1.IngressClassesProxy: {enableIngressClassListingAnnotation, enableIngressClassUpdateAnnotation, enableIngressClassDeletionAnnotation},
capsulev1beta1.PriorityClassesProxy: {enablePriorityClassListingAnnotation, enablePriorityClassUpdateAnnotation, enablePriorityClassDeletionAnnotation},
}
annotationToOperationMap := map[string]capsulev1beta1.ProxyOperation{
enableNodeListingAnnotation: capsulev1beta1.ListOperation,
enableNodeUpdateAnnotation: capsulev1beta1.UpdateOperation,
enableNodeDeletionAnnotation: capsulev1beta1.DeleteOperation,
enableStorageClassListingAnnotation: capsulev1beta1.ListOperation,
enableStorageClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
enableStorageClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
enableIngressClassListingAnnotation: capsulev1beta1.ListOperation,
enableIngressClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
enableIngressClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
enablePriorityClassListingAnnotation: capsulev1beta1.ListOperation,
enablePriorityClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
enablePriorityClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
}
annotationToOwnerKindMap := map[string]capsulev1beta1.OwnerKind{
ownerUsersAnnotation: capsulev1beta1.UserOwner,
ownerGroupsAnnotation: capsulev1beta1.GroupOwner,
ownerServiceAccountAnnotation: capsulev1beta1.ServiceAccountOwner,
}
annotations := in.GetAnnotations()
operations := make(map[string]map[capsulev1beta1.ProxyServiceKind][]capsulev1beta1.ProxyOperation)
for serviceKind, operationAnnotations := range serviceKindToAnnotationMap {
for _, operationAnnotation := range operationAnnotations {
val, ok := annotations[operationAnnotation]
if ok {
for _, owner := range strings.Split(val, ",") {
if _, exists := operations[owner]; !exists {
operations[owner] = make(map[capsulev1beta1.ProxyServiceKind][]capsulev1beta1.ProxyOperation)
}
operations[owner][serviceKind] = append(operations[owner][serviceKind], annotationToOperationMap[operationAnnotation])
}
}
}
}
var owners capsulev1beta1.OwnerListSpec
getProxySettingsForOwner := func(ownerName string) (settings []capsulev1beta1.ProxySettings) {
ownerOperations, ok := operations[ownerName]
if ok {
for k, v := range ownerOperations {
settings = append(settings, capsulev1beta1.ProxySettings{
Kind: k,
Operations: v,
})
}
}
return
}
owners = append(owners, capsulev1beta1.OwnerSpec{
Kind: capsulev1beta1.OwnerKind(in.Spec.Owner.Kind),
Name: in.Spec.Owner.Name,
ProxyOperations: getProxySettingsForOwner(in.Spec.Owner.Name),
})
for ownerAnnotation, ownerKind := range annotationToOwnerKindMap {
val, ok := annotations[ownerAnnotation]
if ok {
for _, owner := range strings.Split(val, ",") {
owners = append(owners, capsulev1beta1.OwnerSpec{
Kind: ownerKind,
Name: owner,
ProxyOperations: getProxySettingsForOwner(owner),
})
}
}
}
return owners
}
// nolint:gocognit,gocyclo,cyclop,maintidx
func (in *Tenant) ConvertTo(dstRaw conversion.Hub) error {
dst, ok := dstRaw.(*capsulev1beta1.Tenant)
if !ok {
return fmt.Errorf("expected type *capsulev1beta1.Tenant, got %T", dst)
}
annotations := in.GetAnnotations()
// ObjectMeta
dst.ObjectMeta = in.ObjectMeta
// Spec
if in.Spec.NamespaceQuota != nil {
if dst.Spec.NamespaceOptions == nil {
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
}
dst.Spec.NamespaceOptions.Quota = in.Spec.NamespaceQuota
}
dst.Spec.NodeSelector = in.Spec.NodeSelector
dst.Spec.Owners = in.convertV1Alpha1OwnerToV1Beta1()
if in.Spec.NamespacesMetadata != nil {
if dst.Spec.NamespaceOptions == nil {
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
}
dst.Spec.NamespaceOptions.AdditionalMetadata = &api.AdditionalMetadataSpec{
Labels: in.Spec.NamespacesMetadata.Labels,
Annotations: in.Spec.NamespacesMetadata.Annotations,
}
}
if in.Spec.ServicesMetadata != nil {
if dst.Spec.ServiceOptions == nil {
dst.Spec.ServiceOptions = &api.ServiceOptions{}
}
dst.Spec.ServiceOptions.AdditionalMetadata = &api.AdditionalMetadataSpec{
Labels: in.Spec.ServicesMetadata.Labels,
Annotations: in.Spec.ServicesMetadata.Annotations,
}
}
if in.Spec.StorageClasses != nil {
dst.Spec.StorageClasses = in.Spec.StorageClasses
}
if v, annotationOk := in.Annotations[ingressHostnameCollisionScope]; annotationOk {
switch v {
case string(api.HostnameCollisionScopeCluster), string(api.HostnameCollisionScopeTenant), string(api.HostnameCollisionScopeNamespace):
dst.Spec.IngressOptions.HostnameCollisionScope = api.HostnameCollisionScope(v)
default:
dst.Spec.IngressOptions.HostnameCollisionScope = api.HostnameCollisionScopeDisabled
}
}
if in.Spec.IngressClasses != nil {
dst.Spec.IngressOptions.AllowedClasses = &api.AllowedListSpec{
Exact: in.Spec.IngressClasses.Exact,
Regex: in.Spec.IngressClasses.Regex,
}
}
if in.Spec.IngressHostnames != nil {
dst.Spec.IngressOptions.AllowedHostnames = &api.AllowedListSpec{
Exact: in.Spec.IngressHostnames.Exact,
Regex: in.Spec.IngressHostnames.Regex,
}
}
if in.Spec.ContainerRegistries != nil {
dst.Spec.ContainerRegistries = &api.AllowedListSpec{
Exact: in.Spec.ContainerRegistries.Exact,
Regex: in.Spec.ContainerRegistries.Regex,
}
}
if len(in.Spec.NetworkPolicies) > 0 {
dst.Spec.NetworkPolicies = api.NetworkPolicySpec{
Items: in.Spec.NetworkPolicies,
}
}
if len(in.Spec.LimitRanges) > 0 {
dst.Spec.LimitRanges = api.LimitRangesSpec{
Items: in.Spec.LimitRanges,
}
}
if len(in.Spec.ResourceQuota) > 0 {
dst.Spec.ResourceQuota = api.ResourceQuotaSpec{
Scope: func() api.ResourceQuotaScope {
if v, annotationOk := in.GetAnnotations()[resourceQuotaScopeAnnotation]; annotationOk {
switch v {
case string(api.ResourceQuotaScopeNamespace):
return api.ResourceQuotaScopeNamespace
case string(api.ResourceQuotaScopeTenant):
return api.ResourceQuotaScopeTenant
}
}
return api.ResourceQuotaScopeTenant
}(),
Items: in.Spec.ResourceQuota,
}
}
dst.Spec.AdditionalRoleBindings = in.Spec.AdditionalRoleBindings
if in.Spec.ExternalServiceIPs != nil {
if dst.Spec.ServiceOptions == nil {
dst.Spec.ServiceOptions = &api.ServiceOptions{}
}
dst.Spec.ServiceOptions.ExternalServiceIPs = in.Spec.ExternalServiceIPs
}
pullPolicies, ok := annotations[podAllowedImagePullPolicyAnnotation]
if ok {
for _, policy := range strings.Split(pullPolicies, ",") {
dst.Spec.ImagePullPolicies = append(dst.Spec.ImagePullPolicies, api.ImagePullPolicySpec(policy))
}
}
priorityClasses := api.AllowedListSpec{}
priorityClassAllowed, ok := annotations[podPriorityAllowedAnnotation]
if ok {
priorityClasses.Exact = strings.Split(priorityClassAllowed, ",")
}
priorityClassesRegexp, ok := annotations[podPriorityAllowedRegexAnnotation]
if ok {
priorityClasses.Regex = priorityClassesRegexp
}
if !reflect.ValueOf(priorityClasses).IsZero() {
dst.Spec.PriorityClasses = &priorityClasses
}
enableNodePorts, ok := annotations[enableNodePortsAnnotation]
if ok {
val, err := strconv.ParseBool(enableNodePorts)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableNodePortsAnnotation, in.GetName()))
}
if dst.Spec.ServiceOptions == nil {
dst.Spec.ServiceOptions = &api.ServiceOptions{}
}
if dst.Spec.ServiceOptions.AllowedServices == nil {
dst.Spec.ServiceOptions.AllowedServices = &api.AllowedServices{}
}
dst.Spec.ServiceOptions.AllowedServices.NodePort = pointer.BoolPtr(val)
}
enableExternalName, ok := annotations[enableExternalNameAnnotation]
if ok {
val, err := strconv.ParseBool(enableExternalName)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableExternalNameAnnotation, in.GetName()))
}
if dst.Spec.ServiceOptions == nil {
dst.Spec.ServiceOptions = &api.ServiceOptions{}
}
if dst.Spec.ServiceOptions.AllowedServices == nil {
dst.Spec.ServiceOptions.AllowedServices = &api.AllowedServices{}
}
dst.Spec.ServiceOptions.AllowedServices.ExternalName = pointer.BoolPtr(val)
}
loadBalancerService, ok := annotations[enableLoadBalancerAnnotation]
if ok {
val, err := strconv.ParseBool(loadBalancerService)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableLoadBalancerAnnotation, in.GetName()))
}
if dst.Spec.ServiceOptions == nil {
dst.Spec.ServiceOptions = &api.ServiceOptions{}
}
if dst.Spec.ServiceOptions.AllowedServices == nil {
dst.Spec.ServiceOptions.AllowedServices = &api.AllowedServices{}
}
dst.Spec.ServiceOptions.AllowedServices.LoadBalancer = pointer.BoolPtr(val)
}
// Status
dst.Status = capsulev1beta1.TenantStatus{
Size: in.Status.Size,
Namespaces: in.Status.Namespaces,
}
// Remove unneeded annotations
delete(dst.ObjectMeta.Annotations, podAllowedImagePullPolicyAnnotation)
delete(dst.ObjectMeta.Annotations, podPriorityAllowedAnnotation)
delete(dst.ObjectMeta.Annotations, podPriorityAllowedRegexAnnotation)
delete(dst.ObjectMeta.Annotations, enableNodePortsAnnotation)
delete(dst.ObjectMeta.Annotations, enableExternalNameAnnotation)
delete(dst.ObjectMeta.Annotations, enableLoadBalancerAnnotation)
delete(dst.ObjectMeta.Annotations, ownerGroupsAnnotation)
delete(dst.ObjectMeta.Annotations, ownerUsersAnnotation)
delete(dst.ObjectMeta.Annotations, ownerServiceAccountAnnotation)
delete(dst.ObjectMeta.Annotations, enableNodeListingAnnotation)
delete(dst.ObjectMeta.Annotations, enableNodeUpdateAnnotation)
delete(dst.ObjectMeta.Annotations, enableNodeDeletionAnnotation)
delete(dst.ObjectMeta.Annotations, enableStorageClassListingAnnotation)
delete(dst.ObjectMeta.Annotations, enableStorageClassUpdateAnnotation)
delete(dst.ObjectMeta.Annotations, enableStorageClassDeletionAnnotation)
delete(dst.ObjectMeta.Annotations, enableIngressClassListingAnnotation)
delete(dst.ObjectMeta.Annotations, enableIngressClassUpdateAnnotation)
delete(dst.ObjectMeta.Annotations, enableIngressClassDeletionAnnotation)
delete(dst.ObjectMeta.Annotations, enablePriorityClassListingAnnotation)
delete(dst.ObjectMeta.Annotations, enablePriorityClassUpdateAnnotation)
delete(dst.ObjectMeta.Annotations, enablePriorityClassDeletionAnnotation)
delete(dst.ObjectMeta.Annotations, resourceQuotaScopeAnnotation)
delete(dst.ObjectMeta.Annotations, ingressHostnameCollisionScope)
return nil
}
// nolint:gocognit,gocyclo,cyclop
func (in *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
ownersAnnotations := map[string][]string{
ownerGroupsAnnotation: nil,
ownerUsersAnnotation: nil,
ownerServiceAccountAnnotation: nil,
}
proxyAnnotations := map[string][]string{
enableNodeListingAnnotation: nil,
enableNodeUpdateAnnotation: nil,
enableNodeDeletionAnnotation: nil,
enableStorageClassListingAnnotation: nil,
enableStorageClassUpdateAnnotation: nil,
enableStorageClassDeletionAnnotation: nil,
enableIngressClassListingAnnotation: nil,
enableIngressClassUpdateAnnotation: nil,
enableIngressClassDeletionAnnotation: nil,
}
for i, owner := range src.Spec.Owners {
if i == 0 {
in.Spec.Owner = OwnerSpec{
Name: owner.Name,
Kind: Kind(owner.Kind),
}
} else {
switch owner.Kind {
case capsulev1beta1.UserOwner:
ownersAnnotations[ownerUsersAnnotation] = append(ownersAnnotations[ownerUsersAnnotation], owner.Name)
case capsulev1beta1.GroupOwner:
ownersAnnotations[ownerGroupsAnnotation] = append(ownersAnnotations[ownerGroupsAnnotation], owner.Name)
case capsulev1beta1.ServiceAccountOwner:
ownersAnnotations[ownerServiceAccountAnnotation] = append(ownersAnnotations[ownerServiceAccountAnnotation], owner.Name)
}
}
for _, setting := range owner.ProxyOperations {
switch setting.Kind {
case capsulev1beta1.NodesProxy:
for _, operation := range setting.Operations {
switch operation {
case capsulev1beta1.ListOperation:
proxyAnnotations[enableNodeListingAnnotation] = append(proxyAnnotations[enableNodeListingAnnotation], owner.Name)
case capsulev1beta1.UpdateOperation:
proxyAnnotations[enableNodeUpdateAnnotation] = append(proxyAnnotations[enableNodeUpdateAnnotation], owner.Name)
case capsulev1beta1.DeleteOperation:
proxyAnnotations[enableNodeDeletionAnnotation] = append(proxyAnnotations[enableNodeDeletionAnnotation], owner.Name)
}
}
case capsulev1beta1.PriorityClassesProxy:
for _, operation := range setting.Operations {
switch operation {
case capsulev1beta1.ListOperation:
proxyAnnotations[enablePriorityClassListingAnnotation] = append(proxyAnnotations[enablePriorityClassListingAnnotation], owner.Name)
case capsulev1beta1.UpdateOperation:
proxyAnnotations[enablePriorityClassUpdateAnnotation] = append(proxyAnnotations[enablePriorityClassUpdateAnnotation], owner.Name)
case capsulev1beta1.DeleteOperation:
proxyAnnotations[enablePriorityClassDeletionAnnotation] = append(proxyAnnotations[enablePriorityClassDeletionAnnotation], owner.Name)
}
}
case capsulev1beta1.StorageClassesProxy:
for _, operation := range setting.Operations {
switch operation {
case capsulev1beta1.ListOperation:
proxyAnnotations[enableStorageClassListingAnnotation] = append(proxyAnnotations[enableStorageClassListingAnnotation], owner.Name)
case capsulev1beta1.UpdateOperation:
proxyAnnotations[enableStorageClassUpdateAnnotation] = append(proxyAnnotations[enableStorageClassUpdateAnnotation], owner.Name)
case capsulev1beta1.DeleteOperation:
proxyAnnotations[enableStorageClassDeletionAnnotation] = append(proxyAnnotations[enableStorageClassDeletionAnnotation], owner.Name)
}
}
case capsulev1beta1.IngressClassesProxy:
for _, operation := range setting.Operations {
switch operation {
case capsulev1beta1.ListOperation:
proxyAnnotations[enableIngressClassListingAnnotation] = append(proxyAnnotations[enableIngressClassListingAnnotation], owner.Name)
case capsulev1beta1.UpdateOperation:
proxyAnnotations[enableIngressClassUpdateAnnotation] = append(proxyAnnotations[enableIngressClassUpdateAnnotation], owner.Name)
case capsulev1beta1.DeleteOperation:
proxyAnnotations[enableIngressClassDeletionAnnotation] = append(proxyAnnotations[enableIngressClassDeletionAnnotation], owner.Name)
}
}
}
}
}
for k, v := range ownersAnnotations {
if len(v) > 0 {
in.Annotations[k] = strings.Join(v, ",")
}
}
for k, v := range proxyAnnotations {
if len(v) > 0 {
in.Annotations[k] = strings.Join(v, ",")
}
}
}
//nolint:cyclop
func (in *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
src, ok := srcRaw.(*capsulev1beta1.Tenant)
if !ok {
return fmt.Errorf("expected *capsulev1beta1.Tenant, got %T", srcRaw)
}
// ObjectMeta
in.ObjectMeta = src.ObjectMeta
// Spec
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.Quota != nil {
in.Spec.NamespaceQuota = src.Spec.NamespaceOptions.Quota
}
in.Spec.NodeSelector = src.Spec.NodeSelector
if in.Annotations == nil {
in.Annotations = make(map[string]string)
}
in.convertV1Beta1OwnerToV1Alpha1(src)
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.AdditionalMetadata != nil {
in.Spec.NamespacesMetadata = &AdditionalMetadata{
Labels: src.Spec.NamespaceOptions.AdditionalMetadata.Labels,
Annotations: src.Spec.NamespaceOptions.AdditionalMetadata.Annotations,
}
}
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AdditionalMetadata != nil {
in.Spec.ServicesMetadata = &AdditionalMetadata{
Labels: src.Spec.ServiceOptions.AdditionalMetadata.Labels,
Annotations: src.Spec.ServiceOptions.AdditionalMetadata.Annotations,
}
}
if src.Spec.StorageClasses != nil {
in.Spec.StorageClasses = src.Spec.StorageClasses
}
in.Annotations[ingressHostnameCollisionScope] = string(src.Spec.IngressOptions.HostnameCollisionScope)
if src.Spec.IngressOptions.AllowedClasses != nil {
in.Spec.IngressClasses = src.Spec.IngressOptions.AllowedClasses
}
if src.Spec.IngressOptions.AllowedHostnames != nil {
in.Spec.IngressHostnames = src.Spec.IngressOptions.AllowedHostnames
}
if src.Spec.ContainerRegistries != nil {
in.Spec.ContainerRegistries = src.Spec.ContainerRegistries
}
if len(src.Spec.NetworkPolicies.Items) > 0 {
in.Spec.NetworkPolicies = src.Spec.NetworkPolicies.Items
}
if len(src.Spec.LimitRanges.Items) > 0 {
in.Spec.LimitRanges = src.Spec.LimitRanges.Items
}
if len(src.Spec.ResourceQuota.Items) > 0 {
in.Annotations[resourceQuotaScopeAnnotation] = string(src.Spec.ResourceQuota.Scope)
in.Spec.ResourceQuota = src.Spec.ResourceQuota.Items
}
in.Spec.AdditionalRoleBindings = src.Spec.AdditionalRoleBindings
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.ExternalServiceIPs != nil {
in.Spec.ExternalServiceIPs = src.Spec.ServiceOptions.ExternalServiceIPs
}
if len(src.Spec.ImagePullPolicies) != 0 {
var pullPolicies []string
for _, policy := range src.Spec.ImagePullPolicies {
pullPolicies = append(pullPolicies, string(policy))
}
in.Annotations[podAllowedImagePullPolicyAnnotation] = strings.Join(pullPolicies, ",")
}
if src.Spec.PriorityClasses != nil {
if len(src.Spec.PriorityClasses.Exact) != 0 {
in.Annotations[podPriorityAllowedAnnotation] = strings.Join(src.Spec.PriorityClasses.Exact, ",")
}
if src.Spec.PriorityClasses.Regex != "" {
in.Annotations[podPriorityAllowedRegexAnnotation] = src.Spec.PriorityClasses.Regex
}
}
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AllowedServices != nil {
if src.Spec.ServiceOptions.AllowedServices.NodePort != nil {
in.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
}
if src.Spec.ServiceOptions.AllowedServices.ExternalName != nil {
in.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
}
if src.Spec.ServiceOptions.AllowedServices.LoadBalancer != nil {
in.Annotations[enableLoadBalancerAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.LoadBalancer)
}
}
// Status
in.Status = TenantStatus{
Size: src.Status.Size,
Namespaces: src.Status.Namespaces,
}
return nil
}

View File

@@ -0,0 +1,393 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"sort"
"testing"
"github.com/stretchr/testify/assert"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
"github.com/clastix/capsule/pkg/api"
)
// nolint:maintidx
func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
var namespaceQuota int32 = 5
nodeSelector := map[string]string{
"foo": "bar",
}
v1alpha1AdditionalMetadataSpec := &AdditionalMetadata{
Labels: map[string]string{
"foo": "bar",
},
Annotations: map[string]string{
"foo": "bar",
},
}
v1alpha1AllowedListSpec := &api.AllowedListSpec{
Exact: []string{"foo", "bar"},
Regex: "^foo*",
}
v1beta1AdditionalMetadataSpec := &api.AdditionalMetadataSpec{
Labels: map[string]string{
"foo": "bar",
},
Annotations: map[string]string{
"foo": "bar",
},
}
v1beta1NamespaceOptions := &capsulev1beta1.NamespaceOptions{
Quota: &namespaceQuota,
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
}
v1beta1ServiceOptions := &api.ServiceOptions{
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
AllowedServices: &api.AllowedServices{
NodePort: pointer.BoolPtr(false),
ExternalName: pointer.BoolPtr(false),
LoadBalancer: pointer.BoolPtr(false),
},
ExternalServiceIPs: &api.ExternalServiceIPsSpec{
Allowed: []api.AllowedIP{"192.168.0.1"},
},
}
v1beta2AllowedListSpec := &api.SelectorAllowedListSpec{
AllowedListSpec: api.AllowedListSpec{
Exact: []string{"foo", "bar"},
Regex: "^foo*",
},
}
networkPolicies := []networkingv1.NetworkPolicySpec{
{
Ingress: []networkingv1.NetworkPolicyIngressRule{
{
From: []networkingv1.NetworkPolicyPeer{
{
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"foo": "tenant-resources",
},
},
},
{
PodSelector: &metav1.LabelSelector{},
},
{
IPBlock: &networkingv1.IPBlock{
CIDR: "192.168.0.0/12",
},
},
},
},
},
},
}
limitRanges := []corev1.LimitRangeSpec{
{
Limits: []corev1.LimitRangeItem{
{
Type: corev1.LimitTypePod,
Min: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("50m"),
corev1.ResourceMemory: resource.MustParse("5Mi"),
},
Max: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1Gi"),
},
},
},
},
}
resourceQuotas := []corev1.ResourceQuotaSpec{
{
Hard: map[corev1.ResourceName]resource.Quantity{
corev1.ResourceLimitsCPU: resource.MustParse("8"),
corev1.ResourceLimitsMemory: resource.MustParse("16Gi"),
corev1.ResourceRequestsCPU: resource.MustParse("8"),
corev1.ResourceRequestsMemory: resource.MustParse("16Gi"),
},
Scopes: []corev1.ResourceQuotaScope{
corev1.ResourceQuotaScopeNotTerminating,
},
},
}
v1beta1Tnt := capsulev1beta1.Tenant{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "alice",
Labels: map[string]string{
"foo": "bar",
},
Annotations: map[string]string{
"foo": "bar",
},
},
Spec: capsulev1beta1.TenantSpec{
Owners: capsulev1beta1.OwnerListSpec{
{
Kind: "User",
Name: "alice",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "IngressClasses",
Operations: []capsulev1beta1.ProxyOperation{"List", "Update", "Delete"},
},
{
Kind: "Nodes",
Operations: []capsulev1beta1.ProxyOperation{"Update", "Delete"},
},
{
Kind: "StorageClasses",
Operations: []capsulev1beta1.ProxyOperation{"Update", "Delete"},
},
},
},
{
Kind: "User",
Name: "bob",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "IngressClasses",
Operations: []capsulev1beta1.ProxyOperation{"Update"},
},
{
Kind: "StorageClasses",
Operations: []capsulev1beta1.ProxyOperation{"List"},
},
},
},
{
Kind: "User",
Name: "jack",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "IngressClasses",
Operations: []capsulev1beta1.ProxyOperation{"Delete"},
},
{
Kind: "Nodes",
Operations: []capsulev1beta1.ProxyOperation{"Delete"},
},
{
Kind: "StorageClasses",
Operations: []capsulev1beta1.ProxyOperation{"List"},
},
{
Kind: "PriorityClasses",
Operations: []capsulev1beta1.ProxyOperation{"List"},
},
},
},
{
Kind: "Group",
Name: "owner-foo",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "IngressClasses",
Operations: []capsulev1beta1.ProxyOperation{"List"},
},
},
},
{
Kind: "Group",
Name: "owner-bar",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "IngressClasses",
Operations: []capsulev1beta1.ProxyOperation{"List"},
},
{
Kind: "StorageClasses",
Operations: []capsulev1beta1.ProxyOperation{"Delete"},
},
},
},
{
Kind: "ServiceAccount",
Name: "system:serviceaccount:oil-production:default",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "Nodes",
Operations: []capsulev1beta1.ProxyOperation{"Update"},
},
},
},
{
Kind: "ServiceAccount",
Name: "system:serviceaccount:gas-production:gas",
ProxyOperations: []capsulev1beta1.ProxySettings{
{
Kind: "StorageClasses",
Operations: []capsulev1beta1.ProxyOperation{"Update"},
},
},
},
},
NamespaceOptions: v1beta1NamespaceOptions,
ServiceOptions: v1beta1ServiceOptions,
StorageClasses: &v1beta2AllowedListSpec.AllowedListSpec,
IngressOptions: capsulev1beta1.IngressOptions{
HostnameCollisionScope: api.HostnameCollisionScopeDisabled,
AllowedClasses: &v1beta2AllowedListSpec.AllowedListSpec,
AllowedHostnames: &v1beta2AllowedListSpec.AllowedListSpec,
},
ContainerRegistries: &v1beta2AllowedListSpec.AllowedListSpec,
NodeSelector: nodeSelector,
NetworkPolicies: api.NetworkPolicySpec{
Items: networkPolicies,
},
LimitRanges: api.LimitRangesSpec{
Items: limitRanges,
},
ResourceQuota: api.ResourceQuotaSpec{
Scope: api.ResourceQuotaScopeNamespace,
Items: resourceQuotas,
},
AdditionalRoleBindings: []api.AdditionalRoleBindingsSpec{
{
ClusterRoleName: "crds-rolebinding",
Subjects: []rbacv1.Subject{
{
Kind: "Group",
APIGroup: rbacv1.GroupName,
Name: "system:authenticated",
},
},
},
},
ImagePullPolicies: []api.ImagePullPolicySpec{"Always", "IfNotPresent"},
PriorityClasses: &api.AllowedListSpec{
Exact: []string{"default"},
Regex: "^tier-.*$",
},
},
Status: capsulev1beta1.TenantStatus{
Size: 1,
Namespaces: []string{"foo", "bar"},
},
}
v1alpha1Tnt := Tenant{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "alice",
Labels: map[string]string{
"foo": "bar",
},
Annotations: map[string]string{
"foo": "bar",
podAllowedImagePullPolicyAnnotation: "Always,IfNotPresent",
enableExternalNameAnnotation: "false",
enableNodePortsAnnotation: "false",
enableLoadBalancerAnnotation: "false",
podPriorityAllowedAnnotation: "default",
podPriorityAllowedRegexAnnotation: "^tier-.*$",
ownerGroupsAnnotation: "owner-foo,owner-bar",
ownerUsersAnnotation: "bob,jack",
ownerServiceAccountAnnotation: "system:serviceaccount:oil-production:default,system:serviceaccount:gas-production:gas",
enableNodeUpdateAnnotation: "alice,system:serviceaccount:oil-production:default",
enableNodeDeletionAnnotation: "alice,jack",
enableStorageClassListingAnnotation: "bob,jack",
enableStorageClassUpdateAnnotation: "alice,system:serviceaccount:gas-production:gas",
enableStorageClassDeletionAnnotation: "alice,owner-bar",
enableIngressClassListingAnnotation: "alice,owner-foo,owner-bar",
enableIngressClassUpdateAnnotation: "alice,bob",
enableIngressClassDeletionAnnotation: "alice,jack",
enablePriorityClassListingAnnotation: "jack",
resourceQuotaScopeAnnotation: "Namespace",
ingressHostnameCollisionScope: "Disabled",
},
},
Spec: TenantSpec{
Owner: OwnerSpec{
Name: "alice",
Kind: "User",
},
NamespaceQuota: &namespaceQuota,
NamespacesMetadata: v1alpha1AdditionalMetadataSpec,
ServicesMetadata: v1alpha1AdditionalMetadataSpec,
StorageClasses: v1alpha1AllowedListSpec,
IngressClasses: v1alpha1AllowedListSpec,
IngressHostnames: v1alpha1AllowedListSpec,
ContainerRegistries: v1alpha1AllowedListSpec,
NodeSelector: nodeSelector,
NetworkPolicies: networkPolicies,
LimitRanges: limitRanges,
ResourceQuota: resourceQuotas,
AdditionalRoleBindings: []api.AdditionalRoleBindingsSpec{
{
ClusterRoleName: "crds-rolebinding",
Subjects: []rbacv1.Subject{
{
Kind: "Group",
APIGroup: rbacv1.GroupName,
Name: "system:authenticated",
},
},
},
},
ExternalServiceIPs: &api.ExternalServiceIPsSpec{
Allowed: []api.AllowedIP{"192.168.0.1"},
},
},
Status: TenantStatus{
Size: 1,
Namespaces: []string{"foo", "bar"},
},
}
return v1alpha1Tnt, v1beta1Tnt
}
func TestConversionHub_ConvertTo(t *testing.T) {
v1beta1ConvertedTnt := capsulev1beta1.Tenant{}
v1alpha1Tnt, v1beta1tnt := generateTenantsSpecs()
err := v1alpha1Tnt.ConvertTo(&v1beta1ConvertedTnt)
if assert.NoError(t, err) {
sort.Slice(v1beta1tnt.Spec.Owners, func(i, j int) bool {
return v1beta1tnt.Spec.Owners[i].Name < v1beta1tnt.Spec.Owners[j].Name
})
sort.Slice(v1beta1ConvertedTnt.Spec.Owners, func(i, j int) bool {
return v1beta1ConvertedTnt.Spec.Owners[i].Name < v1beta1ConvertedTnt.Spec.Owners[j].Name
})
for _, owner := range v1beta1tnt.Spec.Owners {
sort.Slice(owner.ProxyOperations, func(i, j int) bool {
return owner.ProxyOperations[i].Kind < owner.ProxyOperations[j].Kind
})
}
for _, owner := range v1beta1ConvertedTnt.Spec.Owners {
sort.Slice(owner.ProxyOperations, func(i, j int) bool {
return owner.ProxyOperations[i].Kind < owner.ProxyOperations[j].Kind
})
}
assert.Equal(t, v1beta1tnt, v1beta1ConvertedTnt)
}
}
func TestConversionHub_ConvertFrom(t *testing.T) {
v1alpha1ConvertedTnt := Tenant{}
v1alpha1Tnt, v1beta1tnt := generateTenantsSpecs()
err := v1alpha1ConvertedTnt.ConvertFrom(&v1beta1tnt)
if assert.NoError(t, err) {
assert.EqualValues(t, v1alpha1Tnt, v1alpha1ConvertedTnt)
}
}

View File

@@ -0,0 +1,23 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Package v1alpha1 contains API Schema definitions for the capsule.clastix.io v1alpha1 API group
// +kubebuilder:object:generate=true
// +groupName=capsule.clastix.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "capsule.clastix.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

17
api/v1alpha1/owner.go Normal file
View File

@@ -0,0 +1,17 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
// OwnerSpec defines tenant owner name and kind.
type OwnerSpec struct {
Name string `json:"name"`
Kind Kind `json:"kind"`
}
// +kubebuilder:validation:Enum=User;Group
type Kind string
func (k Kind) String() string {
return string(k)
}

View File

@@ -0,0 +1,34 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"sort"
corev1 "k8s.io/api/core/v1"
)
func (in *Tenant) IsFull() bool {
// we don't have limits on assigned Namespaces
if in.Spec.NamespaceQuota == nil {
return false
}
return len(in.Status.Namespaces) >= int(*in.Spec.NamespaceQuota)
}
func (in *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
var l []string
for _, ns := range namespaces {
if ns.Status.Phase == corev1.NamespaceActive {
l = append(l, ns.GetName())
}
}
sort.Strings(l)
in.Status.Namespaces = l
in.Status.Size = uint(len(l))
}

View File

@@ -0,0 +1,71 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/clastix/capsule/pkg/api"
)
// TenantSpec defines the desired state of Tenant.
type TenantSpec struct {
Owner OwnerSpec `json:"owner"`
// +kubebuilder:validation:Minimum=1
NamespaceQuota *int32 `json:"namespaceQuota,omitempty"`
NamespacesMetadata *AdditionalMetadata `json:"namespacesMetadata,omitempty"`
ServicesMetadata *AdditionalMetadata `json:"servicesMetadata,omitempty"`
StorageClasses *api.AllowedListSpec `json:"storageClasses,omitempty"`
IngressClasses *api.AllowedListSpec `json:"ingressClasses,omitempty"`
IngressHostnames *api.AllowedListSpec `json:"ingressHostnames,omitempty"`
ContainerRegistries *api.AllowedListSpec `json:"containerRegistries,omitempty"`
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
NetworkPolicies []networkingv1.NetworkPolicySpec `json:"networkPolicies,omitempty"`
LimitRanges []corev1.LimitRangeSpec `json:"limitRanges,omitempty"`
ResourceQuota []corev1.ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
AdditionalRoleBindings []api.AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
ExternalServiceIPs *api.ExternalServiceIPsSpec `json:"externalServiceIPs,omitempty"`
}
// TenantStatus defines the observed state of Tenant.
type TenantStatus struct {
Size uint `json:"size"`
Namespaces []string `json:"namespaces,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster,shortName=tnt
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceQuota",description="The max amount of Namespaces can be created"
// +kubebuilder:printcolumn:name="Namespace count",type="integer",JSONPath=".status.size",description="The total amount of Namespaces in use"
// +kubebuilder:printcolumn:name="Owner name",type="string",JSONPath=".spec.owner.name",description="The assigned Tenant owner"
// +kubebuilder:printcolumn:name="Owner kind",type="string",JSONPath=".spec.owner.kind",description="The assigned Tenant owner kind"
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
// +kubebuilder:deprecatedversion:warning="This version is going to be dropped in the upcoming version of Capsule; please, migrate to v1beta2 version."
// Tenant is the Schema for the tenants API.
type Tenant struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TenantSpec `json:"spec,omitempty"`
Status TenantStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// TenantList contains a list of Tenant.
type TenantList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Tenant `json:"items"`
}
func init() {
SchemeBuilder.Register(&Tenant{}, &TenantList{})
}

View File

@@ -0,0 +1,21 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1alpha1
import (
"os"
ctrl "sigs.k8s.io/controller-runtime"
)
func (in *Tenant) SetupWebhookWithManager(mgr ctrl.Manager) error {
certData, _ := os.ReadFile("/tmp/k8s-webhook-server/serving-certs/tls.crt")
if len(certData) == 0 {
return nil
}
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}

View File

@@ -0,0 +1,308 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"github.com/clastix/capsule/pkg/api"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/networking/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdditionalMetadata) DeepCopyInto(out *AdditionalMetadata) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalMetadata.
func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
if in == nil {
return nil
}
out := new(AdditionalMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapsuleConfiguration) DeepCopyInto(out *CapsuleConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapsuleConfiguration.
func (in *CapsuleConfiguration) DeepCopy() *CapsuleConfiguration {
if in == nil {
return nil
}
out := new(CapsuleConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CapsuleConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapsuleConfigurationList) DeepCopyInto(out *CapsuleConfigurationList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CapsuleConfiguration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapsuleConfigurationList.
func (in *CapsuleConfigurationList) DeepCopy() *CapsuleConfigurationList {
if in == nil {
return nil
}
out := new(CapsuleConfigurationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CapsuleConfigurationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CapsuleConfigurationSpec) DeepCopyInto(out *CapsuleConfigurationSpec) {
*out = *in
if in.UserGroups != nil {
in, out := &in.UserGroups, &out.UserGroups
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CapsuleConfigurationSpec.
func (in *CapsuleConfigurationSpec) DeepCopy() *CapsuleConfigurationSpec {
if in == nil {
return nil
}
out := new(CapsuleConfigurationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OwnerSpec) DeepCopyInto(out *OwnerSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerSpec.
func (in *OwnerSpec) DeepCopy() *OwnerSpec {
if in == nil {
return nil
}
out := new(OwnerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Tenant) DeepCopyInto(out *Tenant) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tenant.
func (in *Tenant) DeepCopy() *Tenant {
if in == nil {
return nil
}
out := new(Tenant)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Tenant) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantList) DeepCopyInto(out *TenantList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Tenant, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantList.
func (in *TenantList) DeepCopy() *TenantList {
if in == nil {
return nil
}
out := new(TenantList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *TenantList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
*out = *in
out.Owner = in.Owner
if in.NamespaceQuota != nil {
in, out := &in.NamespaceQuota, &out.NamespaceQuota
*out = new(int32)
**out = **in
}
if in.NamespacesMetadata != nil {
in, out := &in.NamespacesMetadata, &out.NamespacesMetadata
*out = new(AdditionalMetadata)
(*in).DeepCopyInto(*out)
}
if in.ServicesMetadata != nil {
in, out := &in.ServicesMetadata, &out.ServicesMetadata
*out = new(AdditionalMetadata)
(*in).DeepCopyInto(*out)
}
if in.StorageClasses != nil {
in, out := &in.StorageClasses, &out.StorageClasses
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.IngressClasses != nil {
in, out := &in.IngressClasses, &out.IngressClasses
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.IngressHostnames != nil {
in, out := &in.IngressHostnames, &out.IngressHostnames
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.ContainerRegistries != nil {
in, out := &in.ContainerRegistries, &out.ContainerRegistries
*out = new(api.AllowedListSpec)
(*in).DeepCopyInto(*out)
}
if in.NodeSelector != nil {
in, out := &in.NodeSelector, &out.NodeSelector
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.NetworkPolicies != nil {
in, out := &in.NetworkPolicies, &out.NetworkPolicies
*out = make([]v1.NetworkPolicySpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.LimitRanges != nil {
in, out := &in.LimitRanges, &out.LimitRanges
*out = make([]corev1.LimitRangeSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ResourceQuota != nil {
in, out := &in.ResourceQuota, &out.ResourceQuota
*out = make([]corev1.ResourceQuotaSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AdditionalRoleBindings != nil {
in, out := &in.AdditionalRoleBindings, &out.AdditionalRoleBindings
*out = make([]api.AdditionalRoleBindingsSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ExternalServiceIPs != nil {
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
*out = new(api.ExternalServiceIPsSpec)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSpec.
func (in *TenantSpec) DeepCopy() *TenantSpec {
if in == nil {
return nil
}
out := new(TenantSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TenantStatus) DeepCopyInto(out *TenantStatus) {
*out = *in
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantStatus.
func (in *TenantStatus) DeepCopy() *TenantStatus {
if in == nil {
return nil
}
out := new(TenantStatus)
in.DeepCopyInto(out)
return out
}

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Package v1beta1 contains API Schema definitions for the capsule v1beta1 API group

View File

@@ -1,10 +1,10 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
import (
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
)
type IngressOptions struct {

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
@@ -6,8 +6,7 @@ package v1beta1
import (
"strings"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/clastix/capsule/pkg/api"
)
type NamespaceOptions struct {
@@ -19,11 +18,11 @@ type NamespaceOptions struct {
}
func (in *Tenant) hasForbiddenNamespaceLabelsAnnotations() bool {
if _, ok := in.Annotations[meta.ForbiddenNamespaceLabelsAnnotation]; ok {
if _, ok := in.Annotations[api.ForbiddenNamespaceLabelsAnnotation]; ok {
return true
}
if _, ok := in.Annotations[meta.ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
if _, ok := in.Annotations[api.ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
return true
}
@@ -31,11 +30,11 @@ func (in *Tenant) hasForbiddenNamespaceLabelsAnnotations() bool {
}
func (in *Tenant) hasForbiddenNamespaceAnnotationsAnnotations() bool {
if _, ok := in.Annotations[meta.ForbiddenNamespaceAnnotationsAnnotation]; ok {
if _, ok := in.Annotations[api.ForbiddenNamespaceAnnotationsAnnotation]; ok {
return true
}
if _, ok := in.Annotations[meta.ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
if _, ok := in.Annotations[api.ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
return true
}
@@ -48,8 +47,8 @@ func (in *Tenant) ForbiddenUserNamespaceLabels() *api.ForbiddenListSpec {
}
return &api.ForbiddenListSpec{
Exact: strings.Split(in.Annotations[meta.ForbiddenNamespaceLabelsAnnotation], ","),
Regex: in.Annotations[meta.ForbiddenNamespaceLabelsRegexpAnnotation],
Exact: strings.Split(in.Annotations[api.ForbiddenNamespaceLabelsAnnotation], ","),
Regex: in.Annotations[api.ForbiddenNamespaceLabelsRegexpAnnotation],
}
}
@@ -59,7 +58,7 @@ func (in *Tenant) ForbiddenUserNamespaceAnnotations() *api.ForbiddenListSpec {
}
return &api.ForbiddenListSpec{
Exact: strings.Split(in.Annotations[meta.ForbiddenNamespaceAnnotationsAnnotation], ","),
Regex: in.Annotations[meta.ForbiddenNamespaceAnnotationsRegexpAnnotation],
Exact: strings.Split(in.Annotations[api.ForbiddenNamespaceAnnotationsAnnotation], ","),
Regex: in.Annotations[api.ForbiddenNamespaceAnnotationsRegexpAnnotation],
}
}

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
@@ -19,7 +19,7 @@ func (in OwnerListSpec) FindOwner(name string, kind OwnerKind) (owner OwnerSpec)
return in[i]
}
return owner
return
}
type ByKindAndName OwnerListSpec

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,10 +1,10 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
import (
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
)
type ServiceOptions struct {

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
@@ -6,7 +6,7 @@ package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
)
// TenantSpec defines the desired state of Tenant.
@@ -20,21 +20,17 @@ type TenantSpec struct {
// Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
StorageClasses *api.AllowedListSpec `json:"storageClasses,omitempty"`
// Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
// +optional
IngressOptions IngressOptions `json:"ingressOptions,omitzero"`
IngressOptions IngressOptions `json:"ingressOptions,omitempty"`
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
ContainerRegistries *api.AllowedListSpec `json:"containerRegistries,omitempty"`
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
// +optional
NetworkPolicies api.NetworkPolicySpec `json:"networkPolicies,omitzero"`
NetworkPolicies api.NetworkPolicySpec `json:"networkPolicies,omitempty"`
// Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
// +optional
LimitRanges api.LimitRangesSpec `json:"limitRanges,omitzero"`
LimitRanges api.LimitRangesSpec `json:"limitRanges,omitempty"`
// Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
// +optional
ResourceQuota api.ResourceQuotaSpec `json:"resourceQuotas,omitzero"`
ResourceQuota api.ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
// Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
AdditionalRoleBindings []api.AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
@@ -54,13 +50,11 @@ type TenantSpec struct {
// Tenant is the Schema for the tenants API.
type Tenant struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec TenantSpec `json:"spec"`
// +optional
Status TenantStatus `json:"status,omitzero"`
Spec TenantSpec `json:"spec,omitempty"`
Status TenantStatus `json:"status,omitempty"`
}
func (in *Tenant) Hub() {}
@@ -70,10 +64,8 @@ func (in *Tenant) Hub() {}
// TenantList contains a list of Tenant.
type TenantList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitzero"`
Items []Tenant `json:"items"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Tenant `json:"items"`
}
func init() {
@@ -83,7 +75,9 @@ func init() {
func (in *Tenant) GetNamespaces() (res []string) {
res = make([]string, 0, len(in.Status.Namespaces))
res = append(res, in.Status.Namespaces...)
for _, ns := range in.Status.Namespaces {
res = append(res, ns)
}
return res
return
}

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
@@ -15,6 +15,7 @@ func (in *Tenant) SetupWebhookWithManager(mgr ctrl.Manager) error {
return nil
}
return ctrl.NewWebhookManagedBy(mgr, in).
return ctrl.NewWebhookManagedBy(mgr).
For(in).
Complete()
}

View File

@@ -1,6 +1,7 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Copyright 2020-2023 Project Capsule Authors.
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Code generated by controller-gen. DO NOT EDIT.
@@ -8,7 +9,7 @@
package v1beta1
import (
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
runtime "k8s.io/apimachinery/pkg/runtime"
)

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2

View File

@@ -0,0 +1,142 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"fmt"
"strconv"
"strings"
"sigs.k8s.io/controller-runtime/pkg/conversion"
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
)
func (in *CapsuleConfiguration) ConvertTo(raw conversion.Hub) error {
dst, ok := raw.(*capsulev1alpha1.CapsuleConfiguration)
if !ok {
return fmt.Errorf("expected type *capsulev1alpha1.CapsuleConfiguration, got %T", dst)
}
dst.ObjectMeta = in.ObjectMeta
dst.Spec.ProtectedNamespaceRegexpString = in.Spec.ProtectedNamespaceRegexpString
dst.Spec.UserGroups = in.Spec.UserGroups
dst.Spec.ProtectedNamespaceRegexpString = in.Spec.ProtectedNamespaceRegexpString
annotations := dst.GetAnnotations()
if annotations == nil {
annotations = make(map[string]string)
}
if in.Spec.NodeMetadata != nil {
if len(in.Spec.NodeMetadata.ForbiddenLabels.Exact) > 0 {
annotations[capsulev1alpha1.ForbiddenNodeLabelsAnnotation] = strings.Join(in.Spec.NodeMetadata.ForbiddenLabels.Exact, ",")
}
if len(in.Spec.NodeMetadata.ForbiddenLabels.Regex) > 0 {
annotations[capsulev1alpha1.ForbiddenNodeLabelsRegexpAnnotation] = in.Spec.NodeMetadata.ForbiddenLabels.Regex
}
if len(in.Spec.NodeMetadata.ForbiddenAnnotations.Exact) > 0 {
annotations[capsulev1alpha1.ForbiddenNodeAnnotationsAnnotation] = strings.Join(in.Spec.NodeMetadata.ForbiddenAnnotations.Exact, ",")
}
if len(in.Spec.NodeMetadata.ForbiddenAnnotations.Regex) > 0 {
annotations[capsulev1alpha1.ForbiddenNodeAnnotationsRegexpAnnotation] = in.Spec.NodeMetadata.ForbiddenAnnotations.Regex
}
}
annotations[capsulev1alpha1.EnableTLSConfigurationAnnotationName] = fmt.Sprintf("%t", in.Spec.EnableTLSReconciler)
annotations[capsulev1alpha1.TLSSecretNameAnnotation] = in.Spec.CapsuleResources.TLSSecretName
annotations[capsulev1alpha1.MutatingWebhookConfigurationName] = in.Spec.CapsuleResources.MutatingWebhookConfigurationName
annotations[capsulev1alpha1.ValidatingWebhookConfigurationName] = in.Spec.CapsuleResources.ValidatingWebhookConfigurationName
dst.SetAnnotations(annotations)
return nil
}
func (in *CapsuleConfiguration) ConvertFrom(raw conversion.Hub) error {
src, ok := raw.(*capsulev1alpha1.CapsuleConfiguration)
if !ok {
return fmt.Errorf("expected type *capsulev1alpha1.CapsuleConfiguration, got %T", src)
}
in.ObjectMeta = src.ObjectMeta
in.Spec.ProtectedNamespaceRegexpString = src.Spec.ProtectedNamespaceRegexpString
in.Spec.UserGroups = src.Spec.UserGroups
in.Spec.ProtectedNamespaceRegexpString = src.Spec.ProtectedNamespaceRegexpString
annotations := src.GetAnnotations()
if value, found := annotations[capsulev1alpha1.ForbiddenNodeLabelsAnnotation]; found {
if in.Spec.NodeMetadata == nil {
in.Spec.NodeMetadata = &NodeMetadata{}
}
in.Spec.NodeMetadata.ForbiddenLabels.Exact = strings.Split(value, ",")
delete(annotations, capsulev1alpha1.ForbiddenNodeLabelsAnnotation)
}
if value, found := annotations[capsulev1alpha1.ForbiddenNodeLabelsRegexpAnnotation]; found {
if in.Spec.NodeMetadata == nil {
in.Spec.NodeMetadata = &NodeMetadata{}
}
in.Spec.NodeMetadata.ForbiddenLabels.Regex = value
delete(annotations, capsulev1alpha1.ForbiddenNodeLabelsRegexpAnnotation)
}
if value, found := annotations[capsulev1alpha1.ForbiddenNodeAnnotationsAnnotation]; found {
if in.Spec.NodeMetadata == nil {
in.Spec.NodeMetadata = &NodeMetadata{}
}
in.Spec.NodeMetadata.ForbiddenAnnotations.Exact = strings.Split(value, ",")
delete(annotations, capsulev1alpha1.ForbiddenNodeAnnotationsAnnotation)
}
if value, found := annotations[capsulev1alpha1.ForbiddenNodeAnnotationsRegexpAnnotation]; found {
if in.Spec.NodeMetadata == nil {
in.Spec.NodeMetadata = &NodeMetadata{}
}
in.Spec.NodeMetadata.ForbiddenAnnotations.Regex = value
delete(annotations, capsulev1alpha1.ForbiddenNodeAnnotationsRegexpAnnotation)
}
if value, found := annotations[capsulev1alpha1.EnableTLSConfigurationAnnotationName]; found {
v, _ := strconv.ParseBool(value)
in.Spec.EnableTLSReconciler = v
delete(annotations, capsulev1alpha1.EnableTLSConfigurationAnnotationName)
}
if value, found := annotations[capsulev1alpha1.TLSSecretNameAnnotation]; found {
in.Spec.CapsuleResources.TLSSecretName = value
delete(annotations, capsulev1alpha1.TLSSecretNameAnnotation)
}
if value, found := annotations[capsulev1alpha1.MutatingWebhookConfigurationName]; found {
in.Spec.CapsuleResources.MutatingWebhookConfigurationName = value
delete(annotations, capsulev1alpha1.MutatingWebhookConfigurationName)
}
if value, found := annotations[capsulev1alpha1.ValidatingWebhookConfigurationName]; found {
in.Spec.CapsuleResources.ValidatingWebhookConfigurationName = value
delete(annotations, capsulev1alpha1.ValidatingWebhookConfigurationName)
}
in.SetAnnotations(annotations)
return nil
}

View File

@@ -1,19 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
// CapsuleConfigurationStatus defines the Capsule configuration status.
type CapsuleConfigurationStatus struct {
// Last time all caches were invalided
LastCacheInvalidation metav1.Time `json:"lastCacheInvalidation,omitempty"`
// Users which are considered Capsule Users and are bound to the Capsule Tenant construct.
Users api.UserListSpec `json:"users,omitempty"`
}

View File

@@ -1,37 +1,19 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/clastix/capsule/pkg/api"
)
// CapsuleConfigurationSpec defines the Capsule configuration.
type CapsuleConfigurationSpec struct {
// Define entities which are considered part of the Capsule construct
// Users not mentioned here will be ignored by Capsule
Users api.UserListSpec `json:"users,omitempty"`
// Deprecated: use users property instead (https://projectcapsule.dev/docs/operating/setup/configuration/#users)
//
// Names of the users considered as Capsule users.
UserNames []string `json:"userNames,omitempty"`
// Deprecated: use users property instead (https://projectcapsule.dev/docs/operating/setup/configuration/#users)
//
// Names of the groups considered as Capsule users.
// Names of the groups for Capsule users.
// +kubebuilder:default={capsule.clastix.io}
UserGroups []string `json:"userGroups,omitempty"`
// Define groups which when found in the request of a user will be ignored by the Capsule
// this might be useful if you have one group where all the users are in, but you want to separate administrators from normal users with additional groups.
IgnoreUserWithGroups []string `json:"ignoreUserWithGroups,omitempty"`
// ServiceAccounts within tenant namespaces can be promoted to owners of the given tenant
// this can be achieved by labeling the serviceaccount and then they are considered owners. This can only be done by other owners of the tenant.
// However ServiceAccounts which have been promoted to owner can not promote further serviceAccounts.
// +kubebuilder:default=false
AllowServiceAccountPromotion bool `json:"allowServiceAccountPromotion,omitempty"`
// Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix,
// separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
// +kubebuilder:default=false
@@ -41,73 +23,21 @@ type CapsuleConfigurationSpec struct {
// Allows to set different name rather than the canonical one for the Capsule configuration objects,
// such as webhook secret or configurations.
// +kubebuilder:default={TLSSecretName:"capsule-tls",mutatingWebhookConfigurationName:"capsule-mutating-webhook-configuration",validatingWebhookConfigurationName:"capsule-validating-webhook-configuration"}
// +optional
CapsuleResources CapsuleResources `json:"overrides,omitzero"`
CapsuleResources CapsuleResources `json:"overrides,omitempty"`
// Allows to set the forbidden metadata for the worker nodes that could be patched by a Tenant.
// This applies only if the Tenant has an active NodeSelector, and the Owner have right to patch their nodes.
NodeMetadata *NodeMetadata `json:"nodeMetadata,omitempty"`
// Toggles the TLS reconciler, the controller that is able to generate CA and certificates for the webhooks
// when not using an already provided CA and certificate, or when these are managed externally with Vault, or cert-manager.
// +kubebuilder:default=false
// +kubebuilder:default=true
EnableTLSReconciler bool `json:"enableTLSReconciler"` //nolint:tagliatelle
// Define entities which can act as Administrators in the capsule construct
// These entities are automatically owners for all existing tenants. Meaning they can add namespaces to any tenant. However they must be specific by using the capsule label
// for interacting with namespaces. Because if that label is not defined, it's assumed that namespace interaction was not targeted towards a tenant and will therefor
// be ignored by capsule.
Administrators api.UserListSpec `json:"administrators,omitempty"`
// Configuration for dynamic Validating and Mutating Admission webhooks managed by Capsule.
Admission DynamicAdmission `json:"admission,omitempty"`
// Define Properties for managed ClusterRoles by Capsule
// +kubebuilder:default={}
RBAC *RBACConfiguration `json:"rbac"`
// Define the period of time upon a cache invalidation is executed for all caches.
// +kubebuilder:default="24h"
CacheInvalidation metav1.Duration `json:"cacheInvalidation"`
}
type RBACConfiguration struct {
// The ClusterRoles applied for Administrators
// +kubebuilder:default={capsule-namespace-deleter}
AdministrationClusterRoles []string `json:"administrationClusterRoles,omitempty"`
// The ClusterRoles applied for ServiceAccounts which had owner Promotion
// +kubebuilder:default={capsule-namespace-provisioner,capsule-namespace-deleter}
PromotionClusterRoles []string `json:"promotionClusterRoles,omitempty"`
// Name for the ClusterRole required to grant Namespace Deletion permissions.
// +kubebuilder:default=capsule-namespace-deleter
DeleterClusterRole string `json:"deleter,omitempty"`
// Name for the ClusterRole required to grant Namespace Provision permissions.
// +kubebuilder:default=capsule-namespace-provisioner
ProvisionerClusterRole string `json:"provisioner,omitempty"`
}
type DynamicAdmission struct {
// Configure dynamic Mutating Admission for Capsule
Mutating DynamicAdmissionConfig `json:"mutating,omitempty"`
// Configure dynamic Validating Admission for Capsule
Validating DynamicAdmissionConfig `json:"validating,omitempty"`
}
type DynamicAdmissionConfig struct {
// Name the Admission Webhook
Name meta.RFC1123Name `json:"name,omitempty"`
// Labels added to the Admission Webhook
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations added to the Admission Webhook
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// From the upstram struct
Client admissionregistrationv1.WebhookClientConfig `json:"client"`
}
type NodeMetadata struct {
// Define the labels that a Tenant Owner cannot set for their nodes.
// +optional
ForbiddenLabels api.ForbiddenListSpec `json:"forbiddenLabels,omitzero"`
ForbiddenLabels api.ForbiddenListSpec `json:"forbiddenLabels"`
// Define the annotations that a Tenant Owner cannot set for their nodes.
// +optional
ForbiddenAnnotations api.ForbiddenListSpec `json:"forbiddenAnnotations,omitzero"`
ForbiddenAnnotations api.ForbiddenListSpec `json:"forbiddenAnnotations"`
}
type CapsuleResources struct {
@@ -124,21 +54,15 @@ type CapsuleResources struct {
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:storageversion
// CapsuleConfiguration is the Schema for the Capsule configuration API.
type CapsuleConfiguration struct {
metav1.TypeMeta `json:",inline"`
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
Spec CapsuleConfigurationSpec `json:"spec"`
// +optional
Status CapsuleConfigurationStatus `json:"status,omitzero"`
Spec CapsuleConfigurationSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
@@ -146,9 +70,8 @@ type CapsuleConfiguration struct {
// CapsuleConfigurationList contains a list of CapsuleConfiguration.
type CapsuleConfigurationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitzero"`
Items []CapsuleConfiguration `json:"items"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []CapsuleConfiguration `json:"items"`
}
func init() {

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2

View File

@@ -1,12 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"github.com/projectcapsule/capsule/pkg/api"
)
type GatewayOptions struct {
AllowedClasses *api.DefaultAllowedListSpec `json:"allowedClasses,omitempty"`
}

View File

@@ -1,4 +1,4 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
// Package v1beta2 contains API Schema definitions for the capsule v1beta2 API group

View File

@@ -1,10 +1,10 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
)
type IngressOptions struct {

View File

@@ -1,42 +1,20 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"github.com/projectcapsule/capsule/pkg/api"
"github.com/clastix/capsule/pkg/api"
)
type NamespaceOptions struct {
// +kubebuilder:validation:Minimum=1
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
Quota *int32 `json:"quota,omitempty"`
// Deprecated: Use additionalMetadataList instead (https://projectcapsule.dev/docs/tenants/metadata/#additionalmetadatalist)
//
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
AdditionalMetadata *api.AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant via a list. Optional.
AdditionalMetadataList []api.AdditionalMetadataSelectorSpec `json:"additionalMetadataList,omitempty"`
// Required Metadata for namespace within this tenant
// +optional
RequiredMetadata *RequiredMetadata `json:"requiredMetadata,omitzero"`
// Define the labels that a Tenant Owner cannot set for their Namespace resources.
// +optional
ForbiddenLabels api.ForbiddenListSpec `json:"forbiddenLabels,omitzero"`
ForbiddenLabels api.ForbiddenListSpec `json:"forbiddenLabels,omitempty"`
// Define the annotations that a Tenant Owner cannot set for their Namespace resources.
// +optional
ForbiddenAnnotations api.ForbiddenListSpec `json:"forbiddenAnnotations,omitzero"`
// If enabled only metadata from additionalMetadata is reconciled to the namespaces.
//+kubebuilder:default:=false
ManagedMetadataOnly bool `json:"managedMetadataOnly,omitempty"`
}
type RequiredMetadata struct {
// Labels that must be defined for each namespace
// +optional
Labels map[string]string `json:"labels,omitzero"`
// Annotations that must be defined for each namespace
// +optional
Annotations map[string]string `json:"annotations,omitzero"`
ForbiddenAnnotations api.ForbiddenListSpec `json:"forbiddenAnnotations,omitempty"`
}

View File

@@ -1,33 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api"
)
// +kubebuilder:object:generate=true
type NamespaceRule struct {
// Enforce these properties via Rules
NamespaceRuleBody `json:",inline"`
// Select namespaces which are going to usese
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
}
// +kubebuilder:object:generate=true
type NamespaceRuleBody struct {
// Enforcement Rules applied
//+optional
Enforce NamespaceRuleEnforceBody `json:"enforce,omitzero"`
}
// +kubebuilder:object:generate=true
type NamespaceRuleEnforceBody struct {
// Define registries which are allowed to be used within this tenant
// The rules are aggregated, since you can use Regular Expressions the match registry endpoints
Registries []api.OCIRegistry `json:"registries,omitempty"`
}

57
api/v1beta2/owner.go Normal file
View File

@@ -0,0 +1,57 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
type OwnerSpec struct {
// Kind of tenant owner. Possible values are "User", "Group", and "ServiceAccount"
Kind OwnerKind `json:"kind"`
// Name of tenant owner.
Name string `json:"name"`
// Defines additional cluster-roles for the specific Owner.
// +kubebuilder:default={admin,capsule-namespace-deleter}
ClusterRoles []string `json:"clusterRoles,omitempty"`
// Proxy settings for tenant owner.
ProxyOperations []ProxySettings `json:"proxySettings,omitempty"`
}
// +kubebuilder:validation:Enum=User;Group;ServiceAccount
type OwnerKind string
func (k OwnerKind) String() string {
return string(k)
}
type ProxySettings struct {
Kind ProxyServiceKind `json:"kind"`
Operations []ProxyOperation `json:"operations"`
}
// +kubebuilder:validation:Enum=List;Update;Delete
type ProxyOperation string
func (p ProxyOperation) String() string {
return string(p)
}
// +kubebuilder:validation:Enum=Nodes;StorageClasses;IngressClasses;PriorityClasses
type ProxyServiceKind string
func (p ProxyServiceKind) String() string {
return string(p)
}
const (
NodesProxy ProxyServiceKind = "Nodes"
StorageClassesProxy ProxyServiceKind = "StorageClasses"
IngressClassesProxy ProxyServiceKind = "IngressClasses"
PriorityClassesProxy ProxyServiceKind = "PriorityClasses"
ListOperation ProxyOperation = "List"
UpdateOperation ProxyOperation = "Update"
DeleteOperation ProxyOperation = "Delete"
UserOwner OwnerKind = "User"
GroupOwner OwnerKind = "Group"
ServiceAccountOwner OwnerKind = "ServiceAccount"
)

View File

@@ -1,43 +1,14 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package api
package v1beta2
import (
"slices"
"sort"
)
// +kubebuilder:object:generate=true
type OwnerListSpec []OwnerSpec
func (o OwnerListSpec) IsOwner(name string, groups []string) bool {
for _, owner := range o {
switch owner.Kind {
case UserOwner, ServiceAccountOwner:
if name == owner.Name {
return true
}
case GroupOwner:
if slices.Contains(groups, owner.Name) {
return true
}
}
}
return false
}
func (o OwnerListSpec) ToStatusOwners() OwnerStatusListSpec {
list := OwnerStatusListSpec{}
for _, owner := range o {
list = append(list, owner.CoreOwnerSpec)
}
return list
}
func (o OwnerListSpec) FindOwner(name string, kind OwnerKind) (owner OwnerSpec) {
sort.Sort(ByKindAndName(o))
i := sort.Search(len(o), func(i int) bool {
@@ -48,7 +19,7 @@ func (o OwnerListSpec) FindOwner(name string, kind OwnerKind) (owner OwnerSpec)
return o[i]
}
return owner
return
}
type ByKindAndName OwnerListSpec

View File

@@ -0,0 +1,86 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestOwnerListSpec_FindOwner(t *testing.T) {
bla := OwnerSpec{
Kind: UserOwner,
Name: "bla",
ProxyOperations: []ProxySettings{
{
Kind: IngressClassesProxy,
Operations: []ProxyOperation{"Delete"},
},
},
}
bar := OwnerSpec{
Kind: GroupOwner,
Name: "bar",
ProxyOperations: []ProxySettings{
{
Kind: StorageClassesProxy,
Operations: []ProxyOperation{"Delete"},
},
},
}
baz := OwnerSpec{
Kind: UserOwner,
Name: "baz",
ProxyOperations: []ProxySettings{
{
Kind: StorageClassesProxy,
Operations: []ProxyOperation{"Update"},
},
},
}
fim := OwnerSpec{
Kind: ServiceAccountOwner,
Name: "fim",
ProxyOperations: []ProxySettings{
{
Kind: NodesProxy,
Operations: []ProxyOperation{"List"},
},
},
}
bom := OwnerSpec{
Kind: GroupOwner,
Name: "bom",
ProxyOperations: []ProxySettings{
{
Kind: StorageClassesProxy,
Operations: []ProxyOperation{"Delete"},
},
{
Kind: NodesProxy,
Operations: []ProxyOperation{"Delete"},
},
},
}
qip := OwnerSpec{
Kind: ServiceAccountOwner,
Name: "qip",
ProxyOperations: []ProxySettings{
{
Kind: StorageClassesProxy,
Operations: []ProxyOperation{"List", "Delete"},
},
},
}
owners := OwnerListSpec{bom, qip, bla, bar, baz, fim}
assert.Equal(t, owners.FindOwner("bom", GroupOwner), bom)
assert.Equal(t, owners.FindOwner("qip", ServiceAccountOwner), qip)
assert.Equal(t, owners.FindOwner("bla", UserOwner), bla)
assert.Equal(t, owners.FindOwner("bar", GroupOwner), bar)
assert.Equal(t, owners.FindOwner("baz", UserOwner), baz)
assert.Equal(t, owners.FindOwner("fim", ServiceAccountOwner), fim)
assert.Equal(t, owners.FindOwner("notfound", ServiceAccountOwner), OwnerSpec{})
}

View File

@@ -1,282 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"errors"
"fmt"
"sort"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
func (r *ResourcePool) GetQuotaName() string {
return fmt.Sprintf("capsule-pool-%s", r.GetName())
}
func (r *ResourcePool) AssignNamespaces(namespaces []corev1.Namespace) {
var l []string
for _, ns := range namespaces {
if ns.Status.Phase == corev1.NamespaceActive && ns.DeletionTimestamp == nil {
l = append(l, ns.GetName())
}
}
sort.Strings(l)
r.Status.NamespaceSize = uint(len(l))
r.Status.Namespaces = l
}
func (r *ResourcePool) AssignClaims() {
var size uint
for _, claims := range r.Status.Claims {
for range claims {
size++
}
}
r.Status.ClaimSize = size
}
func (r *ResourcePool) GetClaimFromStatus(cl *ResourcePoolClaim) *ResourcePoolClaimsItem {
ns := cl.Namespace
claims := r.Status.Claims[ns]
if claims == nil {
return nil
}
for _, claim := range claims {
if claim.UID == cl.UID {
return claim
}
}
return nil
}
func (r *ResourcePool) AddClaimToStatus(claim *ResourcePoolClaim) {
ns := claim.Namespace
if r.Status.Claims == nil {
r.Status.Claims = ResourcePoolNamespaceClaimsStatus{}
}
if r.Status.Allocation.Claimed == nil {
r.Status.Allocation.Claimed = corev1.ResourceList{}
}
claims := r.Status.Claims[ns]
if claims == nil {
claims = ResourcePoolClaimsList{}
}
scl := &ResourcePoolClaimsItem{
NamespacedRFC1123ObjectReferenceWithNamespaceWithUID: meta.NamespacedRFC1123ObjectReferenceWithNamespaceWithUID{
UID: claim.UID,
Name: meta.RFC1123Name(claim.Name),
Namespace: meta.RFC1123SubdomainName(claim.Namespace),
},
Claims: claim.Spec.ResourceClaims,
}
// Try to update existing entry if UID matches
exists := false
for i, cl := range claims {
if cl.UID == claim.UID {
claims[i] = scl
exists = true
break
}
}
if !exists {
claims = append(claims, scl)
}
r.Status.Claims[ns] = claims
r.CalculateClaimedResources()
}
func (r *ResourcePool) RemoveClaimFromStatus(claim *ResourcePoolClaim) {
newClaims := ResourcePoolClaimsList{}
claims, ok := r.Status.Claims[claim.Namespace]
if !ok {
return
}
for _, cl := range claims {
if cl.UID != claim.UID {
newClaims = append(newClaims, cl)
}
}
r.Status.Claims[claim.Namespace] = newClaims
if len(newClaims) == 0 {
delete(r.Status.Claims, claim.Namespace)
}
}
func (r *ResourcePool) CalculateClaimedResources() {
usage := corev1.ResourceList{}
for res := range r.Status.Allocation.Hard {
usage[res] = resource.MustParse("0")
}
for _, claims := range r.Status.Claims {
for _, claim := range claims {
for resourceName, qt := range claim.Claims {
amount, exists := usage[resourceName]
if !exists {
amount = resource.MustParse("0")
}
amount.Add(qt)
usage[resourceName] = amount
}
}
}
r.Status.Allocation.Claimed = usage
r.CalculateAvailableResources()
}
func (r *ResourcePool) CalculateAvailableResources() {
available := corev1.ResourceList{}
for res, qt := range r.Status.Allocation.Hard {
amount, exists := r.Status.Allocation.Claimed[res]
if exists {
qt.Sub(amount)
}
available[res] = qt
}
r.Status.Allocation.Available = available
}
func (r *ResourcePool) CanClaimFromPool(claim corev1.ResourceList) []error {
claimable := r.GetAvailableClaimableResources()
errs := []error{}
for resourceName, req := range claim {
available, exists := claimable[resourceName]
if !exists || available.IsZero() || available.Cmp(req) < 0 {
errs = append(errs, errors.New("not enough resources"+string(resourceName)+"available"))
}
}
return errs
}
func (r *ResourcePool) GetAvailableClaimableResources() corev1.ResourceList {
hard := r.Status.Allocation.Hard.DeepCopy()
for resourceName, qt := range hard {
claimed, exists := r.Status.Allocation.Claimed[resourceName]
if !exists {
claimed = resource.MustParse("0")
}
qt.Sub(claimed)
hard[resourceName] = qt
}
return hard
}
// Gets the Hard specification for the resourcequotas
// This takes into account the default resources being used. However they don't count towards the claim usage
// This can be changed in the future, the default is not calculated as usage because this might interrupt the namespace management
// As we would need to verify if a new namespace with it's defaults still has place in the Pool. Same with attempting to join existing namespaces.
func (r *ResourcePool) GetResourceQuotaHardResources(namespace string) corev1.ResourceList {
_, claimed := r.GetNamespaceClaims(namespace)
for resourceName, amount := range claimed {
if amount.IsZero() {
delete(claimed, resourceName)
}
}
// Only Consider Default, when enabled
for resourceName, amount := range r.Spec.Defaults {
usedValue := claimed[resourceName]
usedValue.Add(amount)
claimed[resourceName] = usedValue
}
return claimed
}
// Gets the total amount of claimed resources for a namespace.
func (r *ResourcePool) GetNamespaceClaims(namespace string) (claims map[string]*ResourcePoolClaimsItem, claimedResources corev1.ResourceList) {
claimedResources = corev1.ResourceList{}
claims = map[string]*ResourcePoolClaimsItem{}
// First, check if quota exists in the status
for ns, cl := range r.Status.Claims {
if ns != namespace {
continue
}
for _, claim := range cl {
for resourceName, claimed := range claim.Claims {
usedValue, usedExists := claimedResources[resourceName]
if !usedExists {
usedValue = resource.MustParse("0") // Default to zero if no used value is found
}
// Combine with claim
usedValue.Add(claimed)
claimedResources[resourceName] = usedValue
}
claims[string(claim.UID)] = claim
}
}
return claims, claimedResources
}
// Calculate usage for each namespace.
func (r *ResourcePool) GetClaimedByNamespaceClaims() (claims map[string]corev1.ResourceList) {
claims = map[string]corev1.ResourceList{}
// First, check if quota exists in the status
for ns, cl := range r.Status.Claims {
claims[ns] = corev1.ResourceList{}
nsScope := claims[ns]
for _, claim := range cl {
for resourceName, claimed := range claim.Claims {
usedValue, usedExists := nsScope[resourceName]
if !usedExists {
usedValue = resource.MustParse("0")
}
usedValue.Add(claimed)
nsScope[resourceName] = usedValue
}
}
}
return claims
}

View File

@@ -1,318 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"testing"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/projectcapsule/capsule/pkg/api/meta"
"github.com/stretchr/testify/assert"
)
func TestGetClaimFromStatus(t *testing.T) {
ns := "test-namespace"
testUID := types.UID("test-uid")
otherUID := types.UID("wrong-uid")
claim := &ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claim-a",
Namespace: ns,
UID: testUID,
},
}
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
ns: {
&ResourcePoolClaimsItem{
NamespacedRFC1123ObjectReferenceWithNamespaceWithUID: meta.NamespacedRFC1123ObjectReferenceWithNamespaceWithUID{
UID: testUID,
},
Claims: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("500m"),
corev1.ResourceMemory: resource.MustParse("256Mi"),
},
},
},
},
},
}
t.Run("returns matching claim", func(t *testing.T) {
found := pool.GetClaimFromStatus(claim)
assert.NotNil(t, found)
assert.Equal(t, testUID, found.UID)
})
t.Run("returns nil if UID doesn't match", func(t *testing.T) {
claimWrongUID := *claim
claimWrongUID.UID = otherUID
found := pool.GetClaimFromStatus(&claimWrongUID)
assert.Nil(t, found)
})
t.Run("returns nil if namespace has no claims", func(t *testing.T) {
claimWrongNS := *claim
claimWrongNS.Namespace = "other-ns"
found := pool.GetClaimFromStatus(&claimWrongNS)
assert.Nil(t, found)
})
}
func makeResourceList(cpu, memory string) corev1.ResourceList {
return corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse(cpu),
corev1.ResourceLimitsMemory: resource.MustParse(memory),
}
}
func makeClaim(name, ns string, uid types.UID, res corev1.ResourceList) *ResourcePoolClaim {
return &ResourcePoolClaim{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
UID: uid,
},
Spec: ResourcePoolClaimSpec{
ResourceClaims: res,
},
}
}
func TestAssignNamespaces(t *testing.T) {
pool := &ResourcePool{}
namespaces := []corev1.Namespace{
{ObjectMeta: metav1.ObjectMeta{Name: "active-ns"}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceActive}},
{ObjectMeta: metav1.ObjectMeta{Name: "terminating-ns", DeletionTimestamp: &metav1.Time{}}, Status: corev1.NamespaceStatus{Phase: corev1.NamespaceTerminating}},
}
pool.AssignNamespaces(namespaces)
assert.Equal(t, uint(1), pool.Status.NamespaceSize)
assert.Equal(t, []string{"active-ns"}, pool.Status.Namespaces)
}
func TestAssignClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{},
&ResourcePoolClaimsItem{},
},
},
},
}
pool.AssignClaims()
assert.Equal(t, uint(2), pool.Status.ClaimSize)
}
func TestAddRemoveClaimToStatus(t *testing.T) {
pool := &ResourcePool{}
claim := makeClaim("claim-1", "ns", "uid-1", makeResourceList("1", "1Gi"))
pool.AddClaimToStatus(claim)
stored := pool.GetClaimFromStatus(claim)
assert.NotNil(t, stored)
assert.Equal(t, meta.RFC1123Name("claim-1"), stored.Name)
pool.RemoveClaimFromStatus(claim)
assert.Nil(t, pool.GetClaimFromStatus(claim))
}
func TestCalculateResources(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Allocation: ResourcePoolQuotaStatus{
Hard: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("2"),
},
},
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
pool.CalculateClaimedResources()
actualClaimed := pool.Status.Allocation.Claimed[corev1.ResourceLimitsCPU]
actualAvailable := pool.Status.Allocation.Available[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actualClaimed).Cmp(resource.MustParse("1")))
assert.Equal(t, 0, (&actualAvailable).Cmp(resource.MustParse("1")))
}
func TestCanClaimFromPool(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Allocation: ResourcePoolQuotaStatus{
Hard: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("1Gi"),
},
Claimed: corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("512Mi"),
},
},
},
}
errs := pool.CanClaimFromPool(corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("1Gi"),
})
assert.Len(t, errs, 1)
errs = pool.CanClaimFromPool(corev1.ResourceList{
corev1.ResourceLimitsMemory: resource.MustParse("500Mi"),
})
assert.Len(t, errs, 0)
}
func TestGetResourceQuotaHardResources(t *testing.T) {
pool := &ResourcePool{
Spec: ResourcePoolSpec{
Defaults: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
res := pool.GetResourceQuotaHardResources("ns")
actual := res[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("2")))
}
func TestGetNamespaceClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns": {
&ResourcePoolClaimsItem{
NamespacedRFC1123ObjectReferenceWithNamespaceWithUID: meta.NamespacedRFC1123ObjectReferenceWithNamespaceWithUID{UID: "uid1"},
Claims: corev1.ResourceList{
corev1.ResourceLimitsCPU: resource.MustParse("1"),
},
},
},
},
},
}
claims, res := pool.GetNamespaceClaims("ns")
assert.Contains(t, claims, "uid1")
actual := res[corev1.ResourceLimitsCPU]
assert.Equal(t, 0, (&actual).Cmp(resource.MustParse("1")))
}
func TestGetClaimedByNamespaceClaims(t *testing.T) {
pool := &ResourcePool{
Status: ResourcePoolStatus{
Claims: ResourcePoolNamespaceClaimsStatus{
"ns1": {
&ResourcePoolClaimsItem{
Claims: makeResourceList("1", "1Gi"),
},
},
},
},
}
result := pool.GetClaimedByNamespaceClaims()
actualCPU := result["ns1"][corev1.ResourceLimitsCPU]
actualMem := result["ns1"][corev1.ResourceLimitsMemory]
assert.Equal(t, 0, (&actualCPU).Cmp(resource.MustParse("1")))
assert.Equal(t, 0, (&actualMem).Cmp(resource.MustParse("1Gi")))
}
func TestIsBoundToResourcePool_2(t *testing.T) {
t.Run("bound to resource pool (Assigned=True)", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{},
},
}
assert.Equal(t, false, claim.IsBoundInResourcePool())
})
t.Run("not bound - wrong condition type", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{},
},
},
}
cond := meta.NewAssignedCondition(claim)
cond.Status = metav1.ConditionFalse
claim.Status.Conditions.UpdateConditionByType(cond)
assert.Equal(t, false, claim.IsBoundInResourcePool())
})
t.Run("not bound - condition not true", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{},
},
},
}
cond := meta.NewBoundCondition(claim)
cond.Status = metav1.ConditionFalse
claim.Status.Conditions.UpdateConditionByType(cond)
assert.Equal(t, false, claim.IsBoundInResourcePool())
})
t.Run("not bound - condition not true", func(t *testing.T) {
claim := &ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{},
},
},
}
cond := meta.NewBoundCondition(claim)
cond.Status = metav1.ConditionTrue
claim.Status.Conditions.UpdateConditionByType(cond)
assert.Equal(t, true, claim.IsBoundInResourcePool())
})
}

View File

@@ -1,70 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/projectcapsule/capsule/pkg/api"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
// GlobalResourceQuotaStatus defines the observed state of GlobalResourceQuota.
type ResourcePoolStatus struct {
// How many namespaces are considered
// +kubebuilder:default=0
NamespaceSize uint `json:"namespaceCount,omitempty"`
// Amount of claims
// +kubebuilder:default=0
ClaimSize uint `json:"claimCount,omitempty"`
// Namespaces which are considered for claims
Namespaces []string `json:"namespaces,omitempty"`
// Tracks the quotas for the Resource.
// +optional
Claims ResourcePoolNamespaceClaimsStatus `json:"claims,omitzero"`
// Tracks the Usage from Claimed against what has been granted from the pool
// +optional
Allocation ResourcePoolQuotaStatus `json:"allocation,omitzero"`
// Exhaustions from claims associated with the pool
Exhaustions map[string]api.PoolExhaustionResource `json:"exhaustions,omitempty"`
// Conditions for the resource claim
Conditions meta.ConditionList `json:"conditions,omitzero"`
}
type ResourcePoolNamespaceClaimsStatus map[string]ResourcePoolClaimsList
type ResourcePoolQuotaStatus struct {
// Hard is the set of enforced hard limits for each named resource.
// More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/
// +optional
Hard corev1.ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"`
// Used is the current observed total usage of the resource in the namespace.
// +optional
Claimed corev1.ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"`
// Used to track the usage of the resource in the pool (diff hard - claimed). May be used for further automation
// +optional
Available corev1.ResourceList `json:"available,omitempty" protobuf:"bytes,2,rep,name=available,casttype=ResourceList,castkey=ResourceName"`
}
type ResourcePoolClaimsList []*ResourcePoolClaimsItem
func (r *ResourcePoolClaimsList) GetClaimByUID(uid types.UID) *ResourcePoolClaimsItem {
for _, claim := range *r {
if claim.UID == uid {
return claim
}
}
return nil
}
// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim.
type ResourcePoolClaimsItem struct {
// Reference to the GlobalQuota being claimed from
meta.NamespacedRFC1123ObjectReferenceWithNamespaceWithUID `json:",inline"`
// Claimed resources
Claims corev1.ResourceList `json:"claims,omitempty"`
}

View File

@@ -1,87 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/runtime/selectors"
)
// ResourcePoolSpec.
type ResourcePoolSpec struct {
// Selector to match the namespaces that should be managed by the GlobalResourceQuota
Selectors []selectors.NamespaceSelector `json:"selectors,omitempty"`
// Define the resourcequota served by this resourcepool.
Quota corev1.ResourceQuotaSpec `json:"quota"`
// The Defaults given for each namespace, the default is not counted towards the total allocation
// When you use claims it's recommended to provision Defaults as the prevent the scheduling of any resources
// +optional
Defaults corev1.ResourceList `json:"defaults,omitzero"`
// Additional Configuration
//+kubebuilder:default:={}
// +optional
Config ResourcePoolSpecConfiguration `json:"config,omitzero"`
}
type ResourcePoolSpecConfiguration struct {
// With this option all resources which can be allocated are set to 0 for the resourcequota defaults. (Default false)
// +kubebuilder:default=false
DefaultsAssignZero *bool `json:"defaultsZero,omitempty"`
// Claims are queued whenever they are allocated to a pool. A pool tries to allocate claims in order based on their
// creation date. But no matter their creation time, if a claim is requesting too much resources it's put into the queue
// but if a lower priority claim still has enough space in the available resources, it will be able to claim them. Eventough
// it's priority was lower
// Enabling this option respects to Order. Meaning the Creationtimestamp matters and if a resource is put into the queue, no
// other claim can claim the same resources with lower priority. (Default false)
// +kubebuilder:default=false
OrderedQueue *bool `json:"orderedQueue,omitempty"`
// When a resourcepool is deleted, the resourceclaims bound to it are disassociated from the resourcepool but not deleted.
// By Enabling this option, the resourceclaims will be deleted when the resourcepool is deleted, if they are in bound state. (Default false)
// +kubebuilder:default=false
DeleteBoundResources *bool `json:"deleteBoundResources,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster,shortName=quotapool
// +kubebuilder:printcolumn:name="Claims",type="integer",JSONPath=".status.claimCount",description="The total amount of Claims bound"
// +kubebuilder:printcolumn:name="Namespaces",type="integer",JSONPath=".status.namespaceCount",description="The total amount of Namespaces considered"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="Reconcile Status"
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Reconcile Message"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
// Resourcepools allows you to define a set of resources as known from ResoureQuotas. The Resourcepools are defined at cluster-scope an should
// be administrated by cluster-administrators. However they create an interface, where cluster-administrators can define
// from which namespaces resources from a Resourcepool can be claimed. The claiming is done via a namespaced CRD called ResourcePoolClaim. Then
// it's up the group of users within these namespaces, to manage the resources they consume per namespace. Each Resourcepool provisions a ResourceQuotainto all the selected namespaces. Then essentially the ResourcePoolClaims, when they can be assigned to the ResourcePool stack resources on top of that
// ResourceQuota based on the namspace, where the ResourcePoolClaim was made from.
type ResourcePool struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
Spec ResourcePoolSpec `json:"spec"`
// +optional
Status ResourcePoolStatus `json:"status,omitzero"`
}
// +kubebuilder:object:root=true
// ResourcePoolList contains a list of ResourcePool.
type ResourcePoolList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitzero"`
Items []ResourcePool `json:"items"`
}
func init() {
SchemeBuilder.Register(&ResourcePool{}, &ResourcePoolList{})
}

View File

@@ -1,61 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
// Indicate the claim is bound to a resource pool.
func (r *ResourcePoolClaim) IsExhaustedInResourcePool() bool {
condition := r.Status.Conditions.GetConditionByType(meta.ExhaustedCondition)
if condition == nil {
return false
}
if condition.Status == metav1.ConditionTrue {
return true
}
return false
}
func (r *ResourcePoolClaim) IsAssignedInResourcePool() bool {
condition := r.Status.Conditions.GetConditionByType(meta.AssignedCondition)
if condition == nil {
return false
}
if condition.Status == metav1.ConditionTrue {
return true
}
return false
}
func (r *ResourcePoolClaim) IsBoundInResourcePool() bool {
condition := r.Status.Conditions.GetConditionByType(meta.BoundCondition)
if condition == nil {
return false
}
if condition.Status == metav1.ConditionTrue {
return true
}
return false
}
func (r *ResourcePoolClaim) GetPool() string {
if name := string(r.Status.Pool.Name); name != "" {
return name
}
return r.Spec.Pool
}

View File

@@ -1,171 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
func TestIsBoundToResourcePool(t *testing.T) {
tests := []struct {
name string
claim ResourcePoolClaim
expected bool
}{
{
name: "bound to resource pool (Assigned=True)",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{
Type: meta.BoundCondition,
Status: metav1.ConditionTrue,
Reason: meta.SucceededReason,
Message: "reconciled",
LastTransitionTime: metav1.Now(),
},
},
},
},
expected: true,
},
{
name: "not bound - wrong condition type",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{
Type: meta.AssignedCondition,
Status: metav1.ConditionTrue,
Reason: meta.SucceededReason,
Message: "reconciled",
LastTransitionTime: metav1.Now(),
},
},
},
},
expected: false,
},
{
name: "not bound - status not true",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{
Conditions: meta.ConditionList{
meta.Condition{
Type: meta.AssignedCondition,
Status: metav1.ConditionTrue,
Reason: meta.SucceededReason,
Message: "reconciled",
LastTransitionTime: metav1.Now(),
},
},
},
},
expected: false,
},
{
name: "not bound - empty condition",
claim: ResourcePoolClaim{
Status: ResourcePoolClaimStatus{},
},
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := tt.claim.IsBoundInResourcePool()
assert.Equal(t, tt.expected, actual)
})
}
}
func TestGetPool(t *testing.T) {
tests := []struct {
name string
claim ResourcePoolClaim
expected string
}{
{
name: "returns status pool name when set",
claim: ResourcePoolClaim{
Spec: ResourcePoolClaimSpec{
Pool: "spec-pool",
},
Status: ResourcePoolClaimStatus{
Pool: meta.LocalRFC1123ObjectReferenceWithUID{
Name: meta.RFC1123Name("status-pool"),
},
},
},
expected: "status-pool",
},
{
name: "falls back to spec pool when status pool name is empty",
claim: ResourcePoolClaim{
Spec: ResourcePoolClaimSpec{
Pool: "spec-pool",
},
Status: ResourcePoolClaimStatus{
Pool: meta.LocalRFC1123ObjectReferenceWithUID{
Name: meta.RFC1123Name(""),
},
},
},
expected: "spec-pool",
},
{
name: "falls back to spec pool when status pool struct is zero-value",
claim: ResourcePoolClaim{
Spec: ResourcePoolClaimSpec{
Pool: "spec-pool",
},
Status: ResourcePoolClaimStatus{
Pool: meta.LocalRFC1123ObjectReferenceWithUID{},
},
},
expected: "spec-pool",
},
{
name: "returns empty when both status and spec are empty",
claim: ResourcePoolClaim{
Spec: ResourcePoolClaimSpec{
Pool: "",
},
Status: ResourcePoolClaimStatus{
Pool: meta.LocalRFC1123ObjectReferenceWithUID{
Name: meta.RFC1123Name(""),
},
},
},
expected: "",
},
{
name: "status wins even if spec differs",
claim: ResourcePoolClaim{
Spec: ResourcePoolClaimSpec{
Pool: "spec-pool",
},
Status: ResourcePoolClaimStatus{
Pool: meta.LocalRFC1123ObjectReferenceWithUID{
Name: meta.RFC1123Name("status-pool"),
},
},
},
expected: "status-pool",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actual := tt.claim.GetPool()
assert.Equal(t, tt.expected, actual)
})
}
}

View File

@@ -1,72 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/projectcapsule/capsule/pkg/api/meta"
)
type ResourcePoolClaimSpec struct {
// If there's the possability to claim from multiple global Quotas
// You must be specific about which one you want to claim resources from
// Once bound to a ResourcePool, this field is immutable
Pool string `json:"pool"`
// Amount which should be claimed for the resourcequota
ResourceClaims corev1.ResourceList `json:"claim"`
}
// ResourceQuotaClaimStatus defines the observed state of ResourceQuotaClaim.
type ResourcePoolClaimStatus struct {
// Reference to the GlobalQuota being claimed from
// +optional
Pool meta.LocalRFC1123ObjectReferenceWithUID `json:"pool,omitzero"`
// Deprecated: Use Conditions
//
// +optional
Condition metav1.Condition `json:"condition,omitzero"`
// Conditions for the resource claim
Conditions meta.ConditionList `json:"conditions,omitzero"`
// Tracks the Usage from Claimed from this claim and available resources
// +optional
Allocation ResourcePoolQuotaStatus `json:"allocation,omitzero"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Pool",type="string",JSONPath=".status.pool.name",description="The ResourcePool being claimed from"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="Ready Status"
// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="Ready Message"
// +kubebuilder:printcolumn:name="Bound",type="string",JSONPath=".status.conditions[?(@.type==\"Bound\")].status",description="Bound Status"
// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type==\"Bound\")].message",description="Bound Message"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// ResourcePoolClaim is the Schema for the resourcepoolclaims API.
type ResourcePoolClaim struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
Spec ResourcePoolClaimSpec `json:"spec"`
// +optional
Status ResourcePoolClaimStatus `json:"status,omitzero"`
}
// +kubebuilder:object:root=true
// ResourceQuotaClaimList contains a list of ResourceQuotaClaim.
type ResourcePoolClaimList struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ListMeta `json:"metadata,omitzero"`
Items []ResourcePoolClaim `json:"items"`
}
func init() {
SchemeBuilder.Register(&ResourcePoolClaim{}, &ResourcePoolClaimList{})
}

View File

@@ -1,44 +0,0 @@
// Copyright 2020-2026 Project Capsule Authors
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +kubebuilder:object:root=true
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
type RuleStatus struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitzero"`
// +optional
Status RuleStatusSpec `json:"status,omitzero"`
}
// +kubebuilder:object:root=true
// RuleStatusList contains a list of RuleStatus.
type RuleStatusList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitzero"`
Items []RuleStatus `json:"items"`
}
func init() {
SchemeBuilder.Register(&RuleStatus{}, &RuleStatusList{})
}
// RuleStatus contains the accumulated rules applying to namespace it's deployed in.
// +kubebuilder:object:generate=true
type RuleStatusSpec struct {
// Managed Enforcement properties per Namespace (aggregated from rules)
//+optional
Rule NamespaceRuleBody `json:"rule,omitzero"`
}

View File

@@ -1,48 +1,17 @@
// Copyright 2020-2026 Project Capsule Authors
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta2
import (
"crypto/md5" //#nosec
"encoding/hex"
"fmt"
"strings"
)
const (
// Annotation name part must be no more than 63 characters.
maxAnnotationLength = 63
HardCapsuleQuotaAnnotation = "quota.capsule.clastix.io/hard-"
UsedCapsuleQuotaAnnotation = "quota.capsule.clastix.io/used-"
)
func createAnnotation(format string, resource fmt.Stringer) (string, error) {
resourceStr := strings.ReplaceAll(resource.String(), "/", "_")
hash := md5.Sum([]byte(resourceStr)) //#nosec
hashed := hex.EncodeToString(hash[:])
capsuleHashed := format + hashed
capsuleAnnotation := format + resourceStr
switch {
case len(capsuleAnnotation) <= maxAnnotationLength:
return capsuleAnnotation, nil
case len(capsuleHashed) <= maxAnnotationLength:
return capsuleHashed, nil
case len(hashed) <= maxAnnotationLength:
return hashed, nil
default:
return "", fmt.Errorf("the annotation name would exceed the maximum supported length (%d), skipping", maxAnnotationLength)
}
func UsedQuotaFor(resource fmt.Stringer) string {
return "quota.capsule.clastix.io/used-" + strings.ReplaceAll(resource.String(), "/", "_")
}
func UsedQuotaFor(resource fmt.Stringer) (string, error) {
return createAnnotation(UsedCapsuleQuotaAnnotation, resource)
}
func HardQuotaFor(resource fmt.Stringer) (string, error) {
return createAnnotation(HardCapsuleQuotaAnnotation, resource)
func HardQuotaFor(resource fmt.Stringer) string {
return "quota.capsule.clastix.io/hard-" + strings.ReplaceAll(resource.String(), "/", "_")
}

Some files were not shown because too many files have changed in this diff Show More