Compare commits
187 Commits
v0.1.0-rc5
...
helm-v0.1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afae361627 | ||
|
|
535ef7412c | ||
|
|
f373debf54 | ||
|
|
569d803e95 | ||
|
|
7b3b0d6504 | ||
|
|
0bfca6b60e | ||
|
|
fdc1b3fe39 | ||
|
|
f7bc2e24cc | ||
|
|
d3021633cd | ||
|
|
7fefe4f6de | ||
|
|
302bb19707 | ||
|
|
27a7792c31 | ||
|
|
1a60e83772 | ||
|
|
632268dd68 | ||
|
|
4e07de37c4 | ||
|
|
1d10bcab1e | ||
|
|
d4a5f3beca | ||
|
|
cd56eab119 | ||
|
|
6cee5b73af | ||
|
|
8e7325aecb | ||
|
|
be26783424 | ||
|
|
0b199f4136 | ||
|
|
1bbaebbc90 | ||
|
|
4b8d8b2a7c | ||
|
|
3fb4c41daf | ||
|
|
055791966a | ||
|
|
c9af9c18e4 | ||
|
|
fef381d2b4 | ||
|
|
19aff8c882 | ||
|
|
8da7e22cb2 | ||
|
|
47c37a3d5d | ||
|
|
677175b3ed | ||
|
|
c95e3a2068 | ||
|
|
0be3be4480 | ||
|
|
6ad434fcfb | ||
|
|
e53911942d | ||
|
|
a179645f26 | ||
|
|
778fb4bcc2 | ||
|
|
bc23324fe7 | ||
|
|
4a6fd49554 | ||
|
|
d7baf18bf9 | ||
|
|
5c7804e1bf | ||
|
|
c4481f26f7 | ||
|
|
ec715d2e8f | ||
|
|
0aeaf89cb7 | ||
|
|
3d31ddb4e3 | ||
|
|
e83f344cdc | ||
|
|
da83a8711a | ||
|
|
43a944ace0 | ||
|
|
0acc2d2ef1 | ||
|
|
14f9686bbb | ||
|
|
6ba9826c51 | ||
|
|
bd58084ded | ||
|
|
3a5e50886d | ||
|
|
e2768dad83 | ||
|
|
b97c23176d | ||
|
|
fa8e805842 | ||
|
|
8df66fc232 | ||
|
|
c2218912eb | ||
|
|
e361e2d424 | ||
|
|
260b60d263 | ||
|
|
e0d5e6feb2 | ||
|
|
0784dc7177 | ||
|
|
b17c6c4636 | ||
|
|
52cf597041 | ||
|
|
b8dcded882 | ||
|
|
6a175e9017 | ||
|
|
3c609f84db | ||
|
|
7c3a59c4e4 | ||
|
|
d3e3b8a881 | ||
|
|
7a8148bd58 | ||
|
|
405d3ac52d | ||
|
|
f92acf9a9d | ||
|
|
bbb7b850d6 | ||
|
|
0f7284d190 | ||
|
|
7db263b2b6 | ||
|
|
0a8f50f761 | ||
|
|
7a66e8ea93 | ||
|
|
b5eb03ea76 | ||
|
|
681b514516 | ||
|
|
b28b98a7bc | ||
|
|
f6bf0ca446 | ||
|
|
1081bad7cb | ||
|
|
79372c7332 | ||
|
|
4e8faaf845 | ||
|
|
d1b008972c | ||
|
|
a14c7609df | ||
|
|
03456c0b54 | ||
|
|
ddfe2219a0 | ||
|
|
6b68363a46 | ||
|
|
357834c5b9 | ||
|
|
085d9f6503 | ||
|
|
196e3c910d | ||
|
|
0039c91c23 | ||
|
|
26965a5ea2 | ||
|
|
422b6598ba | ||
|
|
61e6ab4088 | ||
|
|
94c6a64fcb | ||
|
|
75ebb571e4 | ||
|
|
8f3b3eac29 | ||
|
|
7979c256d9 | ||
|
|
bdafbcf90a | ||
|
|
d0530bbbe3 | ||
|
|
1035afc7fe | ||
|
|
67046c5b54 | ||
|
|
564c4db81a | ||
|
|
30c3ab078d | ||
|
|
e9b803b9cd | ||
|
|
cb8e504832 | ||
|
|
713867d916 | ||
|
|
23e55c685c | ||
|
|
6393541818 | ||
|
|
c140ab076e | ||
|
|
6b629777b7 | ||
|
|
5554ed5f32 | ||
|
|
00ef9a2f67 | ||
|
|
46c2f0e997 | ||
|
|
0c0a90a934 | ||
|
|
9d65013a22 | ||
|
|
60ab33337d | ||
|
|
225d671301 | ||
|
|
7538926bae | ||
|
|
0de0eca72a | ||
|
|
d5a702ceae | ||
|
|
a2fda44110 | ||
|
|
06330cf992 | ||
|
|
1ec9936158 | ||
|
|
694b519af8 | ||
|
|
0b34f04291 | ||
|
|
a702ef2af2 | ||
|
|
04d91af9f5 | ||
|
|
8949be7497 | ||
|
|
df08c9e63e | ||
|
|
07daffd669 | ||
|
|
3a42b90221 | ||
|
|
09277e9f3d | ||
|
|
47794c0cf8 | ||
|
|
e24394f329 | ||
|
|
01053d5deb | ||
|
|
b749e34547 | ||
|
|
82480f3afd | ||
|
|
88a9c242a4 | ||
|
|
651c62ff4a | ||
|
|
dcb8b784d5 | ||
|
|
7a698633d7 | ||
|
|
894ea5016b | ||
|
|
e4e3283b90 | ||
|
|
007f0083c2 | ||
|
|
bc6fc920d3 | ||
|
|
01b511b509 | ||
|
|
6223b1c297 | ||
|
|
d5158f06be | ||
|
|
047f4a0ff7 | ||
|
|
71cdb45925 | ||
|
|
9182895811 | ||
|
|
2eceb0935a | ||
|
|
8ead555743 | ||
|
|
57bf3d1c1b | ||
|
|
bb58e90f5d | ||
|
|
f8fa87a998 | ||
|
|
b3658b7bfc | ||
|
|
54d0201161 | ||
|
|
44ffe0ddf5 | ||
|
|
491ab71842 | ||
|
|
4e9dbf8690 | ||
|
|
34614015a0 | ||
|
|
737fb26e39 | ||
|
|
b56015922f | ||
|
|
ddb9ffd79e | ||
|
|
cae65c9f84 | ||
|
|
befcf65bdd | ||
|
|
e1d98334a2 | ||
|
|
848c6d99c2 | ||
|
|
bd12068397 | ||
|
|
4604e44c37 | ||
|
|
31863b53af | ||
|
|
7a055fcb9f | ||
|
|
29ab5ca64a | ||
|
|
c52f7844db | ||
|
|
9244122d42 | ||
|
|
f883e7b662 | ||
|
|
2f5f31b678 | ||
|
|
e7ef9642ad | ||
|
|
34f73af5c4 | ||
|
|
18912a002b | ||
|
|
d43ad2f9f8 | ||
|
|
9a595877ce |
22
.github/workflows/ci.yml
vendored
@@ -7,6 +7,15 @@ on:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
commit_lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: wagoid/commitlint-github-action@v2
|
||||
with:
|
||||
firstParent: true
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
@@ -25,18 +34,9 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: go-mod
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
go-version: '1.16'
|
||||
- run: make installer
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
|
||||
|
||||
50
.github/workflows/docker-ci.yml
vendored
@@ -10,12 +10,27 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
-
|
||||
name: Docker meta
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
GIT_MODIFIED_2=$(git diff --quiet && echo "" || echo ".dirty")
|
||||
# Export to GH_ENV
|
||||
echo "GIT_LAST_TAG=$VERSION" >> $GITHUB_ENV
|
||||
echo "GIT_HEAD_COMMIT=$GIT_HEAD_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_TAG_COMMIT=$GIT_TAG_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_MODIFIED=$(echo "$GIT_MODIFIED_1""$GIT_MODIFIED_2")" >> $GITHUB_ENV
|
||||
echo "GIT_REPO=$(git config --get remote.origin.url)" >> $GITHUB_ENV
|
||||
echo "BUILD_DATE=$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
@@ -26,22 +41,19 @@ jobs:
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
-
|
||||
name: Set up QEMU
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64,arm
|
||||
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
-
|
||||
name: Inspect builder
|
||||
- name: Inspect builder
|
||||
run: |
|
||||
echo "Name: ${{ steps.buildx.outputs.name }}"
|
||||
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
|
||||
@@ -49,16 +61,14 @@ jobs:
|
||||
echo "Flags: ${{ steps.buildx.outputs.flags }}"
|
||||
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
|
||||
|
||||
-
|
||||
name: Login to quay.io Container Registry
|
||||
- name: Login to quay.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ github.repository_owner }}+github
|
||||
password: ${{ secrets.BOT_QUAY_IO }}
|
||||
|
||||
-
|
||||
name: Build and push
|
||||
- name: Build and push
|
||||
id: build-release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -67,7 +77,13 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
|
||||
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
|
||||
GIT_REPO=${{ env.GIT_REPO }}
|
||||
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
|
||||
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
|
||||
-
|
||||
name: Image digest
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
|
||||
37
.github/workflows/e2e.yml
vendored
@@ -3,33 +3,41 @@ name: e2e
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- 'api/**'
|
||||
- 'controllers/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- 'go.*'
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
paths:
|
||||
- '.github/workflows/e2e.yml'
|
||||
- 'api/**'
|
||||
- 'controllers/**'
|
||||
- 'e2e/*'
|
||||
- 'Dockerfile'
|
||||
- 'go.*'
|
||||
- 'main.go'
|
||||
- 'Makefile'
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
name: Kubernetes
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.0']
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.0']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules and Docker images
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: gomod-docker
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/var/lib/docker
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ matrix.k8s-version }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ matrix.k8s-version }}-build-
|
||||
${{ matrix.k8s-version }}-
|
||||
go-version: '1.16'
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
@@ -39,10 +47,11 @@ jobs:
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.8'
|
||||
go-version: '1.16'
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
version: v0.11.1
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
|
||||
1
.github/workflows/helm.yml
vendored
@@ -3,6 +3,7 @@ name: Helm Chart
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
tags: [ "helm-v*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
|
||||
1
.gitignore
vendored
@@ -22,6 +22,7 @@ bin
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
.vscode
|
||||
|
||||
**/*.kubeconfig
|
||||
**/*.crt
|
||||
|
||||
56
Makefile
@@ -1,5 +1,5 @@
|
||||
# Current Operator version
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags)
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags --match "v*")
|
||||
|
||||
# Default bundle image tag
|
||||
BUNDLE_IMG ?= quay.io/clastix/capsule:$(VERSION)-bundle
|
||||
@@ -45,7 +45,7 @@ manager: generate fmt vet
|
||||
|
||||
# Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
run: generate manifests
|
||||
go run ./main.go
|
||||
go run .
|
||||
|
||||
# Creates the single file to install Capsule without any external dependency
|
||||
installer: manifests kustomize
|
||||
@@ -78,6 +78,58 @@ manifests: controller-gen
|
||||
generate: controller-gen
|
||||
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
|
||||
# Setup development env
|
||||
# Usage:
|
||||
# LAPTOP_HOST_IP=<YOUR_LAPTOP_IP> make dev-setup
|
||||
# For example:
|
||||
# LAPTOP_HOST_IP=192.168.10.101 make dev-setup
|
||||
define TLS_CNF
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
[ req_distinguished_name ]
|
||||
countryName = SG
|
||||
stateOrProvinceName = SG
|
||||
localityName = SG
|
||||
organizationName = CAPSULE
|
||||
commonName = CAPSULE
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
IP.1 = $(LAPTOP_HOST_IP)
|
||||
endef
|
||||
export TLS_CNF
|
||||
dev-setup:
|
||||
kubectl -n capsule-system scale deployment capsule-controller-manager --replicas=0
|
||||
mkdir -p /tmp/k8s-webhook-server/serving-certs
|
||||
echo "$${TLS_CNF}" > _tls.cnf
|
||||
openssl req -newkey rsa:4096 -days 3650 -nodes -x509 \
|
||||
-subj "/C=SG/ST=SG/L=SG/O=CAPSULE/CN=CAPSULE" \
|
||||
-extensions req_ext \
|
||||
-config _tls.cnf \
|
||||
-keyout /tmp/k8s-webhook-server/serving-certs/tls.key \
|
||||
-out /tmp/k8s-webhook-server/serving-certs/tls.crt
|
||||
rm -f _tls.cnf
|
||||
export WEBHOOK_URL="https://$${LAPTOP_HOST_IP}:9443"; \
|
||||
export CA_BUNDLE=`openssl base64 -in /tmp/k8s-webhook-server/serving-certs/tls.crt | tr -d '\n'`; \
|
||||
kubectl patch MutatingWebhookConfiguration capsule-mutating-webhook-configuration \
|
||||
--type='json' -p="[\
|
||||
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/mutate-v1-namespace-owner-reference\",'caBundle':\"$${CA_BUNDLE}\"}}\
|
||||
]" && \
|
||||
kubectl patch ValidatingWebhookConfiguration capsule-validating-webhook-configuration \
|
||||
--type='json' -p="[\
|
||||
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/cordoning\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/ingresses\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/namespaces\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/networkpolicies\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/4/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/pods\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/5/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/persistentvolumeclaims\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/6/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/services\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/tenants\",'caBundle':\"$${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/8/clientConfig', 'value':{'url':\"$${WEBHOOK_URL}/nodes\",'caBundle':\"$${CA_BUNDLE}\"}}\
|
||||
]";
|
||||
|
||||
# Build the docker image
|
||||
docker-build: test
|
||||
docker build . -t ${IMG} --build-arg GIT_HEAD_COMMIT=$(GIT_HEAD_COMMIT) \
|
||||
|
||||
156
README.md
@@ -13,180 +13,64 @@
|
||||
|
||||
---
|
||||
|
||||
# Kubernetes multi-tenancy made simple
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
|
||||
# Kubernetes multi-tenancy made easy
|
||||
|
||||
**Capsule** implements a multi-tenant and policy-based environment in your Kubernetes cluster. It is designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it soon becomes complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
|
||||
|
||||
|
||||
# Entering Capsule
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources while the Capsule Policy Engine keeps the different tenants isolated from each other.
|
||||
|
||||
The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator. Take a look at following diagram:
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources.
|
||||
|
||||
<p align="center" style="padding: 60px 20px">
|
||||
<img src="assets/capsule-operator.svg" />
|
||||
</p>
|
||||
On the other side, the Capsule Policy Engine keeps the different tenants isolated from each other. _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator.
|
||||
|
||||
# Features
|
||||
|
||||
## Self-Service
|
||||
Leave to developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
Leave developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
## Preventing Clusters Sprawl
|
||||
|
||||
Share a single cluster with multiple teams, groups of users, or departments by saving operational and management efforts.
|
||||
|
||||
## Governance
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet legal requirements.
|
||||
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet policy requirements.
|
||||
|
||||
## Resources Control
|
||||
|
||||
Take control of the resources consumed by users while preventing them to overtake.
|
||||
|
||||
## Native Experience
|
||||
|
||||
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customized binaries.
|
||||
|
||||
## GitOps ready
|
||||
|
||||
Capsule is completely declarative and GitOps ready.
|
||||
|
||||
## Bring your own device (BYOD)
|
||||
|
||||
Assign to tenants a dedicated set of compute, storage, and network resources and avoid the noisy neighbors' effect.
|
||||
|
||||
# Common use cases for Capsule
|
||||
Please, refer to the corresponding [section](./docs/operator/use-cases/overview.md) in the project documentation for a detailed list of common use cases that Capsule can address.
|
||||
|
||||
# Installation
|
||||
Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](./charts/capsule/README.md)
|
||||
* Use the [single YAML file installer](./config/install.yaml)
|
||||
|
||||
## Install with the single YAML file installer
|
||||
|
||||
Ensure you have `kubectl` installed in your `PATH`.
|
||||
|
||||
Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
namespace/capsule-system created
|
||||
customresourcedefinition.apiextensions.k8s.io/capsuleconfigurations.capsule.clastix.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding created
|
||||
secret/capsule-ca created
|
||||
secret/capsule-tls created
|
||||
service/capsule-controller-manager-metrics-service created
|
||||
service/capsule-webhook-service created
|
||||
deployment.apps/capsule-controller-manager created
|
||||
capsuleconfiguration.capsule.clastix.io/capsule-default created
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration created
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration created
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
## How to create Tenants
|
||||
Use the scaffold [Tenant](config/samples/capsule_v1alpha1_tenant.yaml) and simply apply as cluster admin.
|
||||
|
||||
```
|
||||
$ kubectl apply -f config/samples/capsule_v1alpha1_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
```
|
||||
|
||||
You can check the tenant just created as
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 0 alice User 1m
|
||||
```
|
||||
|
||||
## Tenant owners
|
||||
Each tenant comes with a delegated user or group of users acting as the tenant admin. In the Capsule jargon, this is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant Owner.
|
||||
|
||||
Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
in their token.
|
||||
|
||||
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh alice oil
|
||||
creating certs in TMPDIR /tmp/tmp.4CLgpuime3
|
||||
Generating RSA private key, 2048 bit long modulus (2 primes)
|
||||
............+++++
|
||||
........................+++++
|
||||
e is 65537 (0x010001)
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil created
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil approved
|
||||
kubeconfig file is: alice-oil.kubeconfig
|
||||
to use it as alice export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
## Working with Tenants
|
||||
Log in to the Kubernetes cluster as `alice` tenant owner
|
||||
|
||||
```
|
||||
$ export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
and create a couple of new namespaces
|
||||
|
||||
```
|
||||
$ kubectl create namespace oil-production
|
||||
$ kubectl create namespace oil-development
|
||||
```
|
||||
|
||||
As user `alice` you can operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden:
|
||||
User "alice" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
# Documentation
|
||||
Please, check the project [documentation](./docs/index.md) for more cool things you can do with Capsule.
|
||||
|
||||
# Removal
|
||||
Similar to `deploy`, you can get rid of Capsule using the `remove` target.
|
||||
Please, check the project [documentation](https://capsule.clastix.io) for the cool things you can do with Capsule.
|
||||
|
||||
```
|
||||
$ make remove
|
||||
```
|
||||
# Contributions
|
||||
|
||||
Capsule is Open Source with Apache 2 license and any contribution is welcome.
|
||||
|
||||
# FAQ
|
||||
|
||||
- Q. How to pronounce Capsule?
|
||||
|
||||
A. It should be pronounced as `/ˈkæpsjuːl/`.
|
||||
|
||||
- Q. Can I contribute?
|
||||
|
||||
A. Absolutely! Capsule is Open Source with Apache 2 license and any contribution is welcome. Please refer to the corresponding [section](./docs/operator/contributing.md) in the documentation.
|
||||
|
||||
- Q. Is it production grade?
|
||||
|
||||
A. Although under frequent development and improvements, Capsule is ready to be used in production environments as currently, people are using it in public and private deployments. Check out the [release](https://github.com/clastix/capsule/releases) page for a detailed list of available versions.
|
||||
|
||||
12
api/v1alpha1/capsuleconfiguration_annotations.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package v1alpha1
|
||||
|
||||
const (
|
||||
ForbiddenNodeLabelsAnnotation = "capsule.clastix.io/forbidden-node-labels"
|
||||
ForbiddenNodeLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-node-labels-regexp"
|
||||
ForbiddenNodeAnnotationsAnnotation = "capsule.clastix.io/forbidden-node-annotations"
|
||||
ForbiddenNodeAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-node-annotations-regexp"
|
||||
CASecretNameAnnotation = "capsule.clastix.io/ca-secret-name"
|
||||
TLSSecretNameAnnotation = "capsule.clastix.io/tls-secret-name"
|
||||
MutatingWebhookConfigurationName = "capsule.clastix.io/mutating-webhook-configuration-name"
|
||||
ValidatingWebhookConfigurationName = "capsule.clastix.io/validating-webhook-configuration-name"
|
||||
)
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
)
|
||||
|
||||
// CapsuleConfigurationSpec defines the Capsule configuration
|
||||
// nolint:maligned
|
||||
type CapsuleConfigurationSpec struct {
|
||||
// Names of the groups for Capsule users.
|
||||
// +kubebuilder:default={capsule.clastix.io}
|
||||
@@ -19,15 +18,6 @@ type CapsuleConfigurationSpec struct {
|
||||
ForceTenantPrefix bool `json:"forceTenantPrefix,omitempty"`
|
||||
// Disallow creation of namespaces, whose name matches this regexp
|
||||
ProtectedNamespaceRegexpString string `json:"protectedNamespaceRegex,omitempty"`
|
||||
// When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed.
|
||||
// Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of
|
||||
// two or more Tenant resources although sharing the same allowed hostname(s).
|
||||
//
|
||||
// The JSON path of the resource is: /spec/ingressHostnames/allowed
|
||||
AllowTenantIngressHostnamesCollision bool `json:"allowTenantIngressHostnamesCollision,omitempty"`
|
||||
// Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
// +kubebuilder:default=true
|
||||
AllowIngressHostnameCollision bool `json:"allowIngressHostnameCollision,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
resourceQuotaScopeAnnotation = "capsule.clastix.io/resource-quota-scope"
|
||||
|
||||
podAllowedImagePullPolicyAnnotation = "capsule.clastix.io/allowed-image-pull-policy"
|
||||
|
||||
podPriorityAllowedAnnotation = "priorityclass.capsule.clastix.io/allowed"
|
||||
@@ -24,6 +26,7 @@ const (
|
||||
|
||||
enableNodePortsAnnotation = "capsule.clastix.io/enable-node-ports"
|
||||
enableExternalNameAnnotation = "capsule.clastix.io/enable-external-name"
|
||||
enableLoadBalancerAnnotation = "capsule.clastix.io/enable-loadbalancer-service"
|
||||
|
||||
ownerGroupsAnnotation = "owners.capsule.clastix.io/group"
|
||||
ownerUsersAnnotation = "owners.capsule.clastix.io/user"
|
||||
@@ -41,6 +44,8 @@ const (
|
||||
enablePriorityClassListingAnnotation = "capsule.clastix.io/enable-priorityclass-listing"
|
||||
enablePriorityClassUpdateAnnotation = "capsule.clastix.io/enable-priorityclass-update"
|
||||
enablePriorityClassDeletionAnnotation = "capsule.clastix.io/enable-priorityclass-deletion"
|
||||
|
||||
ingressHostnameCollisionScope = "ingress.capsule.clastix.io/hostname-collision-scope"
|
||||
)
|
||||
|
||||
func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
@@ -132,23 +137,32 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.ObjectMeta = t.ObjectMeta
|
||||
|
||||
// Spec
|
||||
dst.Spec.NamespaceQuota = t.Spec.NamespaceQuota
|
||||
if t.Spec.NamespaceQuota != nil {
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.Quota = t.Spec.NamespaceQuota
|
||||
}
|
||||
|
||||
dst.Spec.NodeSelector = t.Spec.NodeSelector
|
||||
|
||||
dst.Spec.Owners = t.convertV1Alpha1OwnerToV1Beta1()
|
||||
|
||||
if t.Spec.NamespacesMetadata != nil {
|
||||
dst.Spec.NamespacesMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.AdditionalMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
}
|
||||
}
|
||||
if t.Spec.ServicesMetadata != nil {
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
Labels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -159,14 +173,22 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
Regex: t.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if v, ok := t.Annotations[ingressHostnameCollisionScope]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.HostnameCollisionScopeCluster), string(capsulev1beta1.HostnameCollisionScopeTenant), string(capsulev1beta1.HostnameCollisionScopeNamespace):
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScope(v)
|
||||
default:
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScopeDisabled
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressClasses != nil {
|
||||
dst.Spec.IngressClasses = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedClasses = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressClasses.Exact,
|
||||
Regex: t.Spec.IngressClasses.Regex,
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressHostnames != nil {
|
||||
dst.Spec.IngressHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressHostnames.Exact,
|
||||
Regex: t.Spec.IngressHostnames.Regex,
|
||||
}
|
||||
@@ -178,17 +200,28 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
if len(t.Spec.NetworkPolicies) > 0 {
|
||||
dst.Spec.NetworkPolicies = &capsulev1beta1.NetworkPolicySpec{
|
||||
dst.Spec.NetworkPolicies = capsulev1beta1.NetworkPolicySpec{
|
||||
Items: t.Spec.NetworkPolicies,
|
||||
}
|
||||
}
|
||||
if len(t.Spec.LimitRanges) > 0 {
|
||||
dst.Spec.LimitRanges = &capsulev1beta1.LimitRangesSpec{
|
||||
dst.Spec.LimitRanges = capsulev1beta1.LimitRangesSpec{
|
||||
Items: t.Spec.LimitRanges,
|
||||
}
|
||||
}
|
||||
if len(t.Spec.ResourceQuota) > 0 {
|
||||
dst.Spec.ResourceQuota = &capsulev1beta1.ResourceQuotaSpec{
|
||||
dst.Spec.ResourceQuota = capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: func() capsulev1beta1.ResourceQuotaScope {
|
||||
if v, ok := t.GetAnnotations()[resourceQuotaScopeAnnotation]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.ResourceQuotaScopeNamespace):
|
||||
return capsulev1beta1.ResourceQuotaScopeNamespace
|
||||
case string(capsulev1beta1.ResourceQuotaScopeTenant):
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}
|
||||
}
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}(),
|
||||
Items: t.Spec.ResourceQuota,
|
||||
}
|
||||
}
|
||||
@@ -201,12 +234,15 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
if t.Spec.ExternalServiceIPs != nil {
|
||||
dst.Spec.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: make([]capsulev1beta1.AllowedIP, len(t.Spec.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
for i, IP := range t.Spec.ExternalServiceIPs.Allowed {
|
||||
dst.Spec.ExternalServiceIPs.Allowed[i] = capsulev1beta1.AllowedIP(IP)
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs.Allowed[i] = capsulev1beta1.AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,6 +298,21 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.Spec.ServiceOptions.AllowedServices.ExternalName = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
loadBalancerService, ok := annotations[enableLoadBalancerAnnotation]
|
||||
if ok {
|
||||
val, err := strconv.ParseBool(loadBalancerService)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableLoadBalancerAnnotation, t.GetName()))
|
||||
}
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.AllowedServices.LoadBalancer = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
// Status
|
||||
dst.Status = capsulev1beta1.TenantStatus{
|
||||
Size: t.Status.Size,
|
||||
@@ -274,6 +325,7 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, podPriorityAllowedRegexAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableNodePortsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableExternalNameAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableLoadBalancerAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerGroupsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerUsersAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerServiceAccountAnnotation)
|
||||
@@ -289,6 +341,8 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassListingAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassUpdateAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassDeletionAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, resourceQuotaScopeAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ingressHostnameCollisionScope)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -397,7 +451,10 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.ObjectMeta = src.ObjectMeta
|
||||
|
||||
// Spec
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceQuota
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.Quota != nil {
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceOptions.Quota
|
||||
}
|
||||
|
||||
t.Spec.NodeSelector = src.Spec.NodeSelector
|
||||
|
||||
if t.Annotations == nil {
|
||||
@@ -406,16 +463,16 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
|
||||
t.convertV1Beta1OwnerToV1Alpha1(src)
|
||||
|
||||
if src.Spec.NamespacesMetadata != nil {
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.NamespacesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.NamespaceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.NamespaceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.ServicesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.StorageClasses != nil {
|
||||
@@ -424,16 +481,17 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
Regex: src.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressClasses != nil {
|
||||
t.Annotations[ingressHostnameCollisionScope] = string(src.Spec.IngressOptions.HostnameCollisionScope)
|
||||
if src.Spec.IngressOptions.AllowedClasses != nil {
|
||||
t.Spec.IngressClasses = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressClasses.Exact,
|
||||
Regex: src.Spec.IngressClasses.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedClasses.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressHostnames != nil {
|
||||
if src.Spec.IngressOptions.AllowedHostnames != nil {
|
||||
t.Spec.IngressHostnames = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressHostnames.Exact,
|
||||
Regex: src.Spec.IngressHostnames.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedHostnames.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedHostnames.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.ContainerRegistries != nil {
|
||||
@@ -442,13 +500,14 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
Regex: src.Spec.ContainerRegistries.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.NetworkPolicies != nil {
|
||||
if len(src.Spec.NetworkPolicies.Items) > 0 {
|
||||
t.Spec.NetworkPolicies = src.Spec.NetworkPolicies.Items
|
||||
}
|
||||
if src.Spec.LimitRanges != nil {
|
||||
if len(src.Spec.LimitRanges.Items) > 0 {
|
||||
t.Spec.LimitRanges = src.Spec.LimitRanges.Items
|
||||
}
|
||||
if src.Spec.ResourceQuota != nil {
|
||||
if len(src.Spec.ResourceQuota.Items) > 0 {
|
||||
t.Annotations[resourceQuotaScopeAnnotation] = string(src.Spec.ResourceQuota.Scope)
|
||||
t.Spec.ResourceQuota = src.Spec.ResourceQuota.Items
|
||||
}
|
||||
if len(src.Spec.AdditionalRoleBindings) > 0 {
|
||||
@@ -459,12 +518,12 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
})
|
||||
}
|
||||
}
|
||||
if src.Spec.ExternalServiceIPs != nil {
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.ExternalServiceIPs != nil {
|
||||
t.Spec.ExternalServiceIPs = &ExternalServiceIPsSpec{
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ExternalServiceIPs.Allowed)),
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ServiceOptions.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
for i, IP := range src.Spec.ExternalServiceIPs.Allowed {
|
||||
for i, IP := range src.Spec.ServiceOptions.ExternalServiceIPs.Allowed {
|
||||
t.Spec.ExternalServiceIPs.Allowed[i] = AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
@@ -486,8 +545,15 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
}
|
||||
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AllowedServices != nil {
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
if src.Spec.ServiceOptions.AllowedServices.NodePort != nil {
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
}
|
||||
if src.Spec.ServiceOptions.AllowedServices.ExternalName != nil {
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
}
|
||||
if src.Spec.ServiceOptions.AllowedServices.LoadBalancer != nil {
|
||||
t.Annotations[enableLoadBalancerAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.LoadBalancer)
|
||||
}
|
||||
}
|
||||
|
||||
// Status
|
||||
|
||||
@@ -36,18 +36,26 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Regex: "^foo*",
|
||||
}
|
||||
var v1beta1AdditionalMetadataSpec = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: map[string]string{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
AdditionalAnnotations: map[string]string{
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
var v1beta1NamespaceOptions = &capsulev1beta1.NamespaceOptions{
|
||||
Quota: &namespaceQuota,
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
}
|
||||
var v1beta1ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
NodePort: pointer.BoolPtr(false),
|
||||
ExternalName: pointer.BoolPtr(false),
|
||||
LoadBalancer: pointer.BoolPtr(false),
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
}
|
||||
var v1beta1AllowedListSpec = &capsulev1beta1.AllowedListSpec{
|
||||
@@ -222,21 +230,24 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
NamespaceQuota: &namespaceQuota,
|
||||
NamespacesMetadata: v1beta1AdditionalMetadataSpec,
|
||||
ServiceOptions: v1beta1ServiceOptions,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressClasses: v1beta1AllowedListSpec,
|
||||
IngressHostnames: v1beta1AllowedListSpec,
|
||||
NamespaceOptions: v1beta1NamespaceOptions,
|
||||
ServiceOptions: v1beta1ServiceOptions,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeDisabled,
|
||||
AllowedClasses: v1beta1AllowedListSpec,
|
||||
AllowedHostnames: v1beta1AllowedListSpec,
|
||||
},
|
||||
ContainerRegistries: v1beta1AllowedListSpec,
|
||||
NodeSelector: nodeSelector,
|
||||
NetworkPolicies: &capsulev1beta1.NetworkPolicySpec{
|
||||
NetworkPolicies: capsulev1beta1.NetworkPolicySpec{
|
||||
Items: networkPolicies,
|
||||
},
|
||||
LimitRanges: &capsulev1beta1.LimitRangesSpec{
|
||||
LimitRanges: capsulev1beta1.LimitRangesSpec{
|
||||
Items: limitRanges,
|
||||
},
|
||||
ResourceQuota: &capsulev1beta1.ResourceQuotaSpec{
|
||||
ResourceQuota: capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: capsulev1beta1.ResourceQuotaScopeNamespace,
|
||||
Items: resourceQuotas,
|
||||
},
|
||||
AdditionalRoleBindings: []capsulev1beta1.AdditionalRoleBindingsSpec{
|
||||
@@ -251,9 +262,6 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
ImagePullPolicies: []capsulev1beta1.ImagePullPolicySpec{"Always", "IfNotPresent"},
|
||||
PriorityClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{"default"},
|
||||
@@ -278,6 +286,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
podAllowedImagePullPolicyAnnotation: "Always,IfNotPresent",
|
||||
enableExternalNameAnnotation: "false",
|
||||
enableNodePortsAnnotation: "false",
|
||||
enableLoadBalancerAnnotation: "false",
|
||||
podPriorityAllowedAnnotation: "default",
|
||||
podPriorityAllowedRegexAnnotation: "^tier-.*$",
|
||||
ownerGroupsAnnotation: "owner-foo,owner-bar",
|
||||
@@ -292,6 +301,8 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
enableIngressClassUpdateAnnotation: "alice,bob",
|
||||
enableIngressClassDeletionAnnotation: "alice,jack",
|
||||
enablePriorityClassListingAnnotation: "jack",
|
||||
resourceQuotaScopeAnnotation: "Namespace",
|
||||
ingressHostnameCollisionScope: "Disabled",
|
||||
},
|
||||
},
|
||||
Spec: TenantSpec{
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
package v1beta1
|
||||
|
||||
type AdditionalMetadataSpec struct {
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
AdditionalAnnotations map[string]string `json:"additionalAnnotations,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
|
||||
47
api/v1beta1/custom_resource_quota.go
Normal file
@@ -0,0 +1,47 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
ResourceQuotaAnnotationPrefix = "quota.resources.capsule.clastix.io"
|
||||
ResourceUsedAnnotationPrefix = "used.resources.capsule.clastix.io"
|
||||
)
|
||||
|
||||
func UsedAnnotationForResource(kindGroup string) string {
|
||||
return fmt.Sprintf("%s/%s", ResourceUsedAnnotationPrefix, kindGroup)
|
||||
}
|
||||
|
||||
func LimitAnnotationForResource(kindGroup string) string {
|
||||
return fmt.Sprintf("%s/%s", ResourceQuotaAnnotationPrefix, kindGroup)
|
||||
}
|
||||
|
||||
func GetUsedResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
|
||||
usedStr, ok := tenant.GetAnnotations()[UsedAnnotationForResource(kindGroup)]
|
||||
if !ok {
|
||||
usedStr = "0"
|
||||
}
|
||||
|
||||
used, _ := strconv.ParseInt(usedStr, 10, 10)
|
||||
|
||||
return used, nil
|
||||
}
|
||||
|
||||
func GetLimitResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
|
||||
limitStr, ok := tenant.GetAnnotations()[LimitAnnotationForResource(kindGroup)]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("resource %s is not limited for the current tenant", kindGroup)
|
||||
}
|
||||
|
||||
limit, err := strconv.ParseInt(limitStr, 10, 10)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("resource %s limit cannot be parsed, %w", kindGroup, err)
|
||||
}
|
||||
|
||||
return limit, nil
|
||||
}
|
||||
15
api/v1beta1/deny_wildcard.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
denyWildcard = "capsule.clastix.io/deny-wildcard"
|
||||
)
|
||||
|
||||
func (t *Tenant) IsWildcardDenied() bool {
|
||||
if v, ok := t.Annotations[denyWildcard]; ok && v == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
33
api/v1beta1/forbidden_list.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ForbiddenListSpec struct {
|
||||
Exact []string `json:"denied,omitempty"`
|
||||
Regex string `json:"deniedRegex,omitempty"`
|
||||
}
|
||||
|
||||
func (in *ForbiddenListSpec) ExactMatch(value string) (ok bool) {
|
||||
if len(in.Exact) > 0 {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (in ForbiddenListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
return
|
||||
}
|
||||
67
api/v1beta1/forbidden_list_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestForbiddenListSpec_ExactMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
In []string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"bing", "bong"},
|
||||
},
|
||||
{
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
nil,
|
||||
[]string{"any", "value"},
|
||||
},
|
||||
} {
|
||||
a := ForbiddenListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForbiddenListSpec_RegexMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
Regex string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
} {
|
||||
a := ForbiddenListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
14
api/v1beta1/hostname_collision_scope.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
HostnameCollisionScopeCluster HostnameCollisionScope = "Cluster"
|
||||
HostnameCollisionScopeTenant HostnameCollisionScope = "Tenant"
|
||||
HostnameCollisionScopeNamespace HostnameCollisionScope = "Namespace"
|
||||
HostnameCollisionScopeDisabled HostnameCollisionScope = "Disabled"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Cluster;Tenant;Namespace;Disabled
|
||||
type HostnameCollisionScope string
|
||||
24
api/v1beta1/ingress_options.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type IngressOptions struct {
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
AllowedClasses *AllowedListSpec `json:"allowedClasses,omitempty"`
|
||||
// Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames.
|
||||
//
|
||||
//
|
||||
// - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule.
|
||||
//
|
||||
// - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant.
|
||||
//
|
||||
// - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace.
|
||||
//
|
||||
//
|
||||
// Optional.
|
||||
// +kubebuilder:default=Disabled
|
||||
HostnameCollisionScope HostnameCollisionScope `json:"hostnameCollisionScope,omitempty"`
|
||||
// Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
AllowedHostnames *AllowedListSpec `json:"allowedHostnames,omitempty"`
|
||||
}
|
||||
51
api/v1beta1/namespace_options.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package v1beta1
|
||||
|
||||
import "strings"
|
||||
|
||||
type NamespaceOptions struct {
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
Quota *int32 `json:"quota,omitempty"`
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
}
|
||||
|
||||
func (t *Tenant) hasForbiddenNamespaceLabelsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Tenant) hasForbiddenNamespaceAnnotationsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Tenant) ForbiddenUserNamespaceLabels() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceLabelsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceLabelsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation],
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tenant) ForbiddenUserNamespaceAnnotations() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceAnnotationsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceAnnotationsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation],
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,17 @@ package v1beta1
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
// +kubebuilder:validation:Enum=Tenant;Namespace
|
||||
type ResourceQuotaScope string
|
||||
|
||||
const (
|
||||
ResourceQuotaScopeTenant ResourceQuotaScope = "Tenant"
|
||||
ResourceQuotaScopeNamespace ResourceQuotaScope = "Namespace"
|
||||
)
|
||||
|
||||
type ResourceQuotaSpec struct {
|
||||
// +kubebuilder:default=Tenant
|
||||
// Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
Scope ResourceQuotaScope `json:"scope,omitempty"`
|
||||
Items []corev1.ResourceQuotaSpec `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
16
api/v1beta1/service_allowed_types.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type AllowedServices struct {
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
NodePort *bool `json:"nodePort,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
ExternalName *bool `json:"externalName,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
LoadBalancer *bool `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
@@ -8,13 +8,6 @@ type ServiceOptions struct {
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
// Block or deny certain type of Services. Optional.
|
||||
AllowedServices *AllowedServices `json:"allowedServices,omitempty"`
|
||||
}
|
||||
|
||||
type AllowedServices struct {
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
NodePort *bool `json:"nodePort,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
ExternalName *bool `json:"externalName,omitempty"`
|
||||
// Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalIPs,omitempty"`
|
||||
}
|
||||
|
||||
@@ -5,21 +5,26 @@ package v1beta1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
AvailableIngressClassesAnnotation = "capsule.clastix.io/ingress-classes"
|
||||
AvailableIngressClassesRegexpAnnotation = "capsule.clastix.io/ingress-classes-regexp"
|
||||
AvailableStorageClassesAnnotation = "capsule.clastix.io/storage-classes"
|
||||
AvailableStorageClassesRegexpAnnotation = "capsule.clastix.io/storage-classes-regexp"
|
||||
AllowedRegistriesAnnotation = "capsule.clastix.io/allowed-registries"
|
||||
AllowedRegistriesRegexpAnnotation = "capsule.clastix.io/allowed-registries-regexp"
|
||||
AvailableIngressClassesAnnotation = "capsule.clastix.io/ingress-classes"
|
||||
AvailableIngressClassesRegexpAnnotation = "capsule.clastix.io/ingress-classes-regexp"
|
||||
AvailableStorageClassesAnnotation = "capsule.clastix.io/storage-classes"
|
||||
AvailableStorageClassesRegexpAnnotation = "capsule.clastix.io/storage-classes-regexp"
|
||||
AllowedRegistriesAnnotation = "capsule.clastix.io/allowed-registries"
|
||||
AllowedRegistriesRegexpAnnotation = "capsule.clastix.io/allowed-registries-regexp"
|
||||
ForbiddenNamespaceLabelsAnnotation = "capsule.clastix.io/forbidden-namespace-labels"
|
||||
ForbiddenNamespaceLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-labels-regexp"
|
||||
ForbiddenNamespaceAnnotationsAnnotation = "capsule.clastix.io/forbidden-namespace-annotations"
|
||||
ForbiddenNamespaceAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-annotations-regexp"
|
||||
)
|
||||
|
||||
func UsedQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/used-" + resource.String()
|
||||
return "quota.capsule.clastix.io/used-" + strings.ReplaceAll(resource.String(), "/", "_")
|
||||
}
|
||||
|
||||
func HardQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/hard-" + resource.String()
|
||||
return "quota.capsule.clastix.io/hard-" + strings.ReplaceAll(resource.String(), "/", "_")
|
||||
}
|
||||
|
||||
@@ -18,10 +18,10 @@ func (t *Tenant) IsCordoned() bool {
|
||||
|
||||
func (t *Tenant) IsFull() bool {
|
||||
// we don't have limits on assigned Namespaces
|
||||
if t.Spec.NamespaceQuota == nil {
|
||||
if t.Spec.NamespaceOptions == nil || t.Spec.NamespaceOptions.Quota == nil {
|
||||
return false
|
||||
}
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceQuota)
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceOptions.Quota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
|
||||
@@ -3,18 +3,18 @@
|
||||
|
||||
package v1beta1
|
||||
|
||||
// +kubebuilder:validation:Enum=cordoned;active
|
||||
// +kubebuilder:validation:Enum=Cordoned;Active
|
||||
type tenantState string
|
||||
|
||||
const (
|
||||
TenantStateActive tenantState = "active"
|
||||
TenantStateCordoned tenantState = "cordoned"
|
||||
TenantStateActive tenantState = "Active"
|
||||
TenantStateCordoned tenantState = "Cordoned"
|
||||
)
|
||||
|
||||
// Returns the observed state of the Tenant
|
||||
type TenantStatus struct {
|
||||
//+kubebuilder:default=active
|
||||
// The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
//+kubebuilder:default=Active
|
||||
// The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
State tenantState `json:"state"`
|
||||
// How many namespaces are assigned to the Tenant.
|
||||
Size uint `json:"size"`
|
||||
|
||||
@@ -11,37 +11,29 @@ import (
|
||||
type TenantSpec struct {
|
||||
// Specifies the owners of the Tenant. Mandatory.
|
||||
Owners OwnerListSpec `json:"owners"`
|
||||
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
NamespaceQuota *int32 `json:"namespaceQuota,omitempty"`
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
NamespacesMetadata *AdditionalMetadataSpec `json:"namespacesMetadata,omitempty"`
|
||||
// Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
NamespaceOptions *NamespaceOptions `json:"namespaceOptions,omitempty"`
|
||||
// Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
ServiceOptions *ServiceOptions `json:"serviceOptions,omitempty"`
|
||||
// Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
StorageClasses *AllowedListSpec `json:"storageClasses,omitempty"`
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
IngressClasses *AllowedListSpec `json:"ingressClasses,omitempty"`
|
||||
// Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
IngressHostnames *AllowedListSpec `json:"ingressHostnames,omitempty"`
|
||||
// Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
IngressOptions IngressOptions `json:"ingressOptions,omitempty"`
|
||||
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
NetworkPolicies *NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
LimitRanges *LimitRangesSpec `json:"limitRanges,omitempty"`
|
||||
NetworkPolicies NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
// Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
LimitRanges LimitRangesSpec `json:"limitRanges,omitempty"`
|
||||
// Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
ResourceQuota *ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
ResourceQuota ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
// Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
AdditionalRoleBindings []AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
|
||||
// Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalServiceIPs,omitempty"`
|
||||
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
ImagePullPolicies []ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
// Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
PriorityClasses *AllowedListSpec `json:"priorityClasses,omitempty"`
|
||||
}
|
||||
|
||||
@@ -50,7 +42,7 @@ type TenantSpec struct {
|
||||
//+kubebuilder:storageversion
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=tnt
|
||||
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="The actual state of the Tenant"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceQuota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceOptions.quota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace count",type="integer",JSONPath=".status.size",description="The total amount of Namespaces in use"
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
@@ -17,15 +18,15 @@ import (
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalMetadataSpec) DeepCopyInto(out *AdditionalMetadataSpec) {
|
||||
*out = *in
|
||||
if in.AdditionalLabels != nil {
|
||||
in, out := &in.AdditionalLabels, &out.AdditionalLabels
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AdditionalAnnotations != nil {
|
||||
in, out := &in.AdditionalAnnotations, &out.AdditionalAnnotations
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
@@ -96,6 +97,11 @@ func (in *AllowedServices) DeepCopyInto(out *AllowedServices) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedServices.
|
||||
@@ -149,6 +155,51 @@ func (in *ExternalServiceIPsSpec) DeepCopy() *ExternalServiceIPsSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ForbiddenListSpec) DeepCopyInto(out *ForbiddenListSpec) {
|
||||
*out = *in
|
||||
if in.Exact != nil {
|
||||
in, out := &in.Exact, &out.Exact
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForbiddenListSpec.
|
||||
func (in *ForbiddenListSpec) DeepCopy() *ForbiddenListSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ForbiddenListSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressOptions) DeepCopyInto(out *IngressOptions) {
|
||||
*out = *in
|
||||
if in.AllowedClasses != nil {
|
||||
in, out := &in.AllowedClasses, &out.AllowedClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AllowedHostnames != nil {
|
||||
in, out := &in.AllowedHostnames, &out.AllowedHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressOptions.
|
||||
func (in *IngressOptions) DeepCopy() *IngressOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitRangesSpec) DeepCopyInto(out *LimitRangesSpec) {
|
||||
*out = *in
|
||||
@@ -171,6 +222,31 @@ func (in *LimitRangesSpec) DeepCopy() *LimitRangesSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespaceOptions) DeepCopyInto(out *NamespaceOptions) {
|
||||
*out = *in
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalMetadata != nil {
|
||||
in, out := &in.AdditionalMetadata, &out.AdditionalMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceOptions.
|
||||
func (in *NamespaceOptions) DeepCopy() *NamespaceOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespaceOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
|
||||
*out = *in
|
||||
@@ -291,6 +367,11 @@ func (in *ServiceOptions) DeepCopyInto(out *ServiceOptions) {
|
||||
*out = new(AllowedServices)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceOptions.
|
||||
@@ -372,14 +453,9 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NamespaceQuota != nil {
|
||||
in, out := &in.NamespaceQuota, &out.NamespaceQuota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NamespacesMetadata != nil {
|
||||
in, out := &in.NamespacesMetadata, &out.NamespacesMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
if in.NamespaceOptions != nil {
|
||||
in, out := &in.NamespaceOptions, &out.NamespaceOptions
|
||||
*out = new(NamespaceOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ServiceOptions != nil {
|
||||
@@ -392,16 +468,7 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressClasses != nil {
|
||||
in, out := &in.IngressClasses, &out.IngressClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressHostnames != nil {
|
||||
in, out := &in.IngressHostnames, &out.IngressHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.IngressOptions.DeepCopyInto(&out.IngressOptions)
|
||||
if in.ContainerRegistries != nil {
|
||||
in, out := &in.ContainerRegistries, &out.ContainerRegistries
|
||||
*out = new(AllowedListSpec)
|
||||
@@ -414,21 +481,9 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.NetworkPolicies != nil {
|
||||
in, out := &in.NetworkPolicies, &out.NetworkPolicies
|
||||
*out = new(NetworkPolicySpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LimitRanges != nil {
|
||||
in, out := &in.LimitRanges, &out.LimitRanges
|
||||
*out = new(LimitRangesSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ResourceQuota != nil {
|
||||
in, out := &in.ResourceQuota, &out.ResourceQuota
|
||||
*out = new(ResourceQuotaSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.NetworkPolicies.DeepCopyInto(&out.NetworkPolicies)
|
||||
in.LimitRanges.DeepCopyInto(&out.LimitRanges)
|
||||
in.ResourceQuota.DeepCopyInto(&out.ResourceQuota)
|
||||
if in.AdditionalRoleBindings != nil {
|
||||
in, out := &in.AdditionalRoleBindings, &out.AdditionalRoleBindings
|
||||
*out = make([]AdditionalRoleBindingsSpec, len(*in))
|
||||
@@ -436,11 +491,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ImagePullPolicies != nil {
|
||||
in, out := &in.ImagePullPolicies, &out.ImagePullPolicies
|
||||
*out = make([]ImagePullPolicySpec, len(*in))
|
||||
|
||||
@@ -21,8 +21,8 @@ sources:
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.0.19
|
||||
version: 0.1.8
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.0.5
|
||||
appVersion: 0.1.1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Deploying the Capsule Operator
|
||||
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining mutitenancy and access control in Kubernetes.
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining multitenancy and access control in Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -26,7 +26,7 @@ The Capsule Operator Chart can be used to instantly deploy the Capsule Operator
|
||||
|
||||
2. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system
|
||||
$ helm install capsule clastix/capsule -n capsule-system --create-namespace
|
||||
|
||||
3. Show the status:
|
||||
|
||||
@@ -63,10 +63,8 @@ Parameter | Description | Default
|
||||
`manager.hostNetwork` | Specifies if the container should be started in `hostNetwork` mode. | `false`
|
||||
`manager.options.logLevel` | Set the log verbosity of the controller with a value from 1 to 10.| `4`
|
||||
`manager.options.forceTenantPrefix` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash | `false`
|
||||
`manager.options.capsuleUserGroup` | Override the Capsule user group | `capsule.clastix.io`
|
||||
`manager.options.capsuleUserGroups` | Override the Capsule user groups | `[capsule.clastix.io]`
|
||||
`manager.options.protectedNamespaceRegex` | If specified, disallows creation of namespaces matching the passed regexp | `null`
|
||||
`manager.options.allowIngressHostnameCollision` | Allow the Ingress hostname collision at Ingress resource level across all the Tenants | `true`
|
||||
`manager.options.allowTenantIngressHostnamesCollision` | Skip the validation check at Tenant level for colliding Ingress hostnames | `false`
|
||||
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
|
||||
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
|
||||
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
@@ -78,6 +76,7 @@ Parameter | Description | Default
|
||||
`manager.resources.limits/cpu` | Set the memory limits assigned to the controller. | `128Mi`
|
||||
`mutatingWebhooksTimeoutSeconds` | Timeout in seconds for mutating webhooks. | `30`
|
||||
`validatingWebhooksTimeoutSeconds` | Timeout in seconds for validating webhooks. | `30`
|
||||
`webhooks` | Additional configuration for capsule webhooks. |
|
||||
`imagePullSecrets` | Configuration for `imagePullSecrets` so that you can use a private images registry. | `[]`
|
||||
`serviceAccount.create` | Specifies whether a service account should be created. | `true`
|
||||
`serviceAccount.annotations` | Annotations to add to the service account. | `{}`
|
||||
@@ -89,20 +88,26 @@ Parameter | Description | Default
|
||||
`replicaCount` | Set the replica count for Capsule pod. | `1`
|
||||
`affinity` | Set affinity rules for the Capsule pod. | `{}`
|
||||
`podSecurityPolicy.enabled` | Specify if a Pod Security Policy must be created. | `false`
|
||||
`serviceMonitor.enabled` | Specify if a Service Monitor must be created. | `false`
|
||||
`serviceMonitor.serviceAccount.name` | Specify Service Account name for metrics scrape. | `capsule`
|
||||
`serviceMonitor.serviceAccount.namespace` | Specify Service Account namespace for metrics scrape. | `capsule-system`
|
||||
`serviceMonitor.enabled` | Specifies if a service monitor must be created. | `false`
|
||||
`serviceMonitor.labels` | Additional labels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.annotations` | Additional annotations which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.matchLabels` | Additional matchLabels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.serviceAccount.name` | Specifies service account name for metrics scrape. | `capsule`
|
||||
`serviceMonitor.serviceAccount.namespace` | Specifies service account namespace for metrics scrape. | `capsule-system`
|
||||
`customLabels` | Additional labels which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
`customAnnotations` | Additional annotations which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
|
||||
## Created resources
|
||||
|
||||
This Helm Chart cretes the following Kubernetes resources in the release namespace:
|
||||
This Helm Chart creates the following Kubernetes resources in the release namespace:
|
||||
|
||||
* Capsule Namespace
|
||||
* Capsule Operator Deployment
|
||||
* Capsule Service
|
||||
* CA Secret
|
||||
* Certfificate Secret
|
||||
* Certificate Secret
|
||||
* Tenant Custom Resource Definition
|
||||
* CapsuleConfiguration Custom Resource Definition
|
||||
* MutatingWebHookConfiguration
|
||||
* ValidatingWebHookConfiguration
|
||||
* RBAC Cluster Roles
|
||||
@@ -122,4 +127,4 @@ Capsule, as many other add-ons, defines its own set of Custom Resource Definitio
|
||||
|
||||
## More
|
||||
|
||||
See Capsule [use cases](https://github.com/clastix/capsule/blob/master/use_cases.md) for more information about how to use Capsule.
|
||||
See Capsule [tutorial](https://github.com/clastix/capsule/blob/master/docs/content/general/tutorial.md) for more information about how to use Capsule.
|
||||
|
||||
@@ -30,14 +30,8 @@ spec:
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
type: boolean
|
||||
protectedNamespaceRegex:
|
||||
|
||||
@@ -7,7 +7,17 @@ metadata:
|
||||
name: tenants.capsule.clastix.io
|
||||
spec:
|
||||
conversion:
|
||||
strategy: None
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
clientConfig:
|
||||
service:
|
||||
name: capsule-webhook-service
|
||||
namespace: capsule-system
|
||||
path: /convert
|
||||
port: 443
|
||||
conversionReviewVersions:
|
||||
- v1alpha1
|
||||
- v1beta1
|
||||
group: capsule.clastix.io
|
||||
names:
|
||||
kind: Tenant
|
||||
@@ -222,11 +232,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -408,11 +422,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -453,9 +471,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -572,7 +590,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -646,17 +664,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -666,24 +673,37 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
@@ -755,22 +775,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -789,11 +813,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -975,11 +1003,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1020,9 +1052,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1079,7 +1111,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1137,6 +1169,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1144,11 +1183,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1160,11 +1199,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1191,11 +1245,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
|
||||
@@ -40,6 +40,9 @@ helm.sh/chart: {{ include "capsule.chart" . }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.customLabels }}
|
||||
{{ toYaml .Values.customLabels }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -50,6 +53,19 @@ app.kubernetes.io/name: {{ include "capsule.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
ServiceAccount annotations
|
||||
*/}}
|
||||
{{- define "capsule.serviceAccountAnnotations" -}}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
{{- toYaml .Values.serviceAccount.annotations }}
|
||||
{{- end }}
|
||||
{{- if .Values.customAnnotations }}
|
||||
{{ toYaml .Values.customAnnotations }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
@@ -75,11 +91,26 @@ Create the proxy fully-qualified Docker image to use
|
||||
{{- printf "%s:%s" .Values.proxy.image.repository .Values.proxy.image.tag -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Determine the Kubernetes version to use for jobsFullyQualifiedDockerImage tag
|
||||
*/}}
|
||||
{{- define "capsule.jobsTagKubeVersion" -}}
|
||||
{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
|
||||
{{- else }}
|
||||
{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the jobs fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "capsule.jobsFullyQualifiedDockerImage" -}}
|
||||
{{- if .Values.jobs.image.tag }}
|
||||
{{- printf "%s:%s" .Values.jobs.image.repository .Values.jobs.image.tag -}}
|
||||
{{- else }}
|
||||
{{- printf "%s:%s" .Values.jobs.image.repository (include "capsule.jobsTagKubeVersion" .) -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
||||
@@ -3,5 +3,8 @@ kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretCaName" . }}
|
||||
data:
|
||||
|
||||
@@ -3,5 +3,8 @@ kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretTlsName" . }}
|
||||
data:
|
||||
|
||||
@@ -2,6 +2,16 @@ apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: CapsuleConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
capsule.clastix.io/ca-secret-name: {{ include "capsule.secretCaName" . }}
|
||||
capsule.clastix.io/mutating-webhook-configuration-name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
capsule.clastix.io/tls-secret-name: {{ include "capsule.secretTlsName" . }}
|
||||
capsule.clastix.io/validating-webhook-configuration-name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
forceTenantPrefix: {{ .Values.manager.options.forceTenantPrefix }}
|
||||
userGroups:
|
||||
@@ -9,5 +19,3 @@ spec:
|
||||
- {{ . }}
|
||||
{{- end}}
|
||||
protectedNamespaceRegex: {{ .Values.manager.options.protectedNamespaceRegex | quote }}
|
||||
allowTenantIngressHostnamesCollision: {{ .Values.manager.options.allowTenantIngressHostnamesCollision }}
|
||||
allowIngressHostnameCollision: {{ .Values.manager.options.allowIngressHostnameCollision }}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.deploymentName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
@@ -11,12 +15,12 @@ spec:
|
||||
{{- include "capsule.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "capsule.selectorLabels" . | nindent 8 }}
|
||||
{{- include "capsule.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
|
||||
@@ -4,9 +4,13 @@ kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.fullname" . }}-metrics-role
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-controller-manager-metrics-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
@@ -15,7 +19,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /namespace-owner-reference
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.namespaceOwnerReference.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: owner.namespace.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
@@ -28,6 +32,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
|
||||
@@ -5,6 +5,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{{- $cmd := "while [ -z $$(kubectl -n $NAMESPACE get secret capsule-tls -o jsonpath='{.data.tls\\\\.crt}') ];" -}}
|
||||
{{- $cmd := printf "while [ -z $$(kubectl -n $NAMESPACE get secret %s -o jsonpath='{.data.tls\\\\.crt}') ];" (include "capsule.secretCaName" .) -}}
|
||||
{{- $cmd = printf "%s do echo 'waiting Capsule to be up and running...' && sleep 5;" $cmd -}}
|
||||
{{- $cmd = printf "%s done" $cmd -}}
|
||||
apiVersion: batch/v1
|
||||
@@ -6,16 +6,16 @@ kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-waiting-certs"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
@@ -25,6 +25,14 @@ spec:
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: post-install-job
|
||||
|
||||
@@ -7,16 +7,16 @@ kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-rbac-cleaner"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
@@ -26,6 +26,14 @@ spec:
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pre-delete-job
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-role
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
@@ -24,6 +28,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-metrics-reader
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
@@ -36,6 +44,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -51,6 +63,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-manager-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -5,8 +5,8 @@ metadata:
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
{{- if or (.Values.serviceAccount.annotations) (.Values.customAnnotations) }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- include "capsule.serviceAccountAnnotations" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,9 +6,13 @@ metadata:
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 15s
|
||||
@@ -16,7 +20,11 @@ spec:
|
||||
path: /metrics
|
||||
jobLabel: app.kubernetes.io/name
|
||||
selector:
|
||||
matchLabels: {{ include "capsule.labels" . | nindent 6 }}
|
||||
matchLabels:
|
||||
{{- include "capsule.labels" . | nindent 6 }}
|
||||
{{- with .Values.serviceMonitor.matchLabels }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
@@ -15,13 +19,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /cordoning
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.cordoning.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: cordoning.tenant.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.cordoning.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -47,13 +49,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /ingresses
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.ingresses.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: ingress.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.ingresses.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -80,7 +80,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /namespaces
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.namespaces.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: namespaces.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
@@ -109,13 +109,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /networkpolicies
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.networkpolicies.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: networkpolicies.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.networkpolicies.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -140,13 +138,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /pods
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.pods.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: pods.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.pods.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -167,14 +163,12 @@ webhooks:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: capsule-system
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /persistentvolumeclaims
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.persistentvolumeclaims.failurePolicy }}
|
||||
name: pvc.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.persistentvolumeclaims.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -198,13 +192,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /services
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.services.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: services.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.services.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -229,7 +221,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /tenants
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.tenants.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: tenants.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
@@ -248,3 +240,29 @@ webhooks:
|
||||
scope: '*'
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /nodes
|
||||
port: 443
|
||||
failurePolicy: {{ .Values.webhooks.nodes.failurePolicy }}
|
||||
name: nodes.capsule.clastix.io
|
||||
matchPolicy: Exact
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
|
||||
@@ -21,8 +21,6 @@ manager:
|
||||
forceTenantPrefix: false
|
||||
capsuleUserGroups: ["capsule.clastix.io"]
|
||||
protectedNamespaceRegex: ""
|
||||
allowIngressHostnameCollision: true
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@@ -43,9 +41,7 @@ jobs:
|
||||
image:
|
||||
repository: quay.io/clastix/kubectl
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "v1.20.7"
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
tag: ""
|
||||
imagePullSecrets: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
@@ -68,7 +64,7 @@ podSecurityPolicy:
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace:
|
||||
namespace: ''
|
||||
# Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
|
||||
labels: {}
|
||||
annotations: {}
|
||||
@@ -76,3 +72,58 @@ serviceMonitor:
|
||||
serviceAccount:
|
||||
name: capsule
|
||||
namespace: capsule-system
|
||||
|
||||
# Additional labels
|
||||
customLabels: {}
|
||||
|
||||
# Additional annotations
|
||||
customAnnotations: {}
|
||||
|
||||
# Webhooks configurations
|
||||
webhooks:
|
||||
namespaceOwnerReference:
|
||||
failurePolicy: Fail
|
||||
cordoning:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
ingresses:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
namespaces:
|
||||
failurePolicy: Fail
|
||||
networkpolicies:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
pods:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
persistentvolumeclaims:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
tenants:
|
||||
failurePolicy: Fail
|
||||
services:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
nodes:
|
||||
failurePolicy: Fail
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
|
||||
@@ -30,15 +30,8 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration nolint:maligned
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
|
||||
@@ -222,11 +222,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -408,11 +412,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -453,9 +461,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -572,7 +580,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -646,17 +654,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -666,28 +663,41 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
description: Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -755,22 +765,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -789,11 +803,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -975,11 +993,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1020,9 +1042,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1033,7 +1055,7 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
@@ -1079,7 +1101,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1137,6 +1159,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1144,11 +1173,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1160,11 +1189,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1191,11 +1235,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
|
||||
@@ -35,15 +35,8 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration nolint:maligned
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
@@ -301,11 +294,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -487,11 +484,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -532,9 +533,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -651,7 +652,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -725,17 +726,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -745,28 +735,41 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
description: Specifies the resource min/max usage restrictions to the Tenant. The assigned values are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -834,22 +837,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -868,11 +875,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1054,11 +1065,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1099,9 +1114,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1112,7 +1127,7 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namespaces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
@@ -1158,7 +1173,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1216,6 +1231,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1223,11 +1245,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1239,11 +1261,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1270,11 +1307,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
@@ -1374,7 +1411,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: quay.io/clastix/capsule:v0.1.0-rc4
|
||||
image: quay.io/clastix/capsule:v0.1.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
ports:
|
||||
@@ -1408,8 +1445,6 @@ metadata:
|
||||
name: capsule-default
|
||||
namespace: capsule-system
|
||||
spec:
|
||||
allowIngressHostnameCollision: false
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
userGroups:
|
||||
@@ -1437,6 +1472,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
sideEffects: None
|
||||
@@ -1546,6 +1582,29 @@ webhooks:
|
||||
- networkpolicies
|
||||
scope: Namespaced
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: capsule-webhook-service
|
||||
namespace: capsule-system
|
||||
path: /nodes
|
||||
failurePolicy: Fail
|
||||
name: nodes.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -6,5 +6,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -7,4 +7,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: quay.io/clastix/capsule
|
||||
newTag: v0.1.0-rc4
|
||||
newTag: v0.1.1
|
||||
|
||||
@@ -7,5 +7,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -18,27 +18,29 @@ spec:
|
||||
allowedRegex: ^\w+.gcr.io$
|
||||
serviceOptions:
|
||||
additionalMetadata:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
capsule.clastix.io/bgp: "true"
|
||||
additionalLabels:
|
||||
labels:
|
||||
capsule.clastix.io/pool: gas
|
||||
allowedServices:
|
||||
nodePort: false
|
||||
externalName: false
|
||||
externalServiceIPs:
|
||||
allowed:
|
||||
- 10.20.0.0/16
|
||||
- "10.96.42.42"
|
||||
externalIPs:
|
||||
allowed:
|
||||
- 10.20.0.0/16
|
||||
- "10.96.42.42"
|
||||
imagePullPolicies:
|
||||
- Always
|
||||
ingressClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+-lb$
|
||||
ingressHostnames:
|
||||
allowed:
|
||||
- gas.acmecorp.com
|
||||
allowedRegex: ^.*acmecorp.com$
|
||||
ingressOptions:
|
||||
hostnameCollisionScope: Cluster
|
||||
allowedClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+-lb$
|
||||
allowedHostnames:
|
||||
allowed:
|
||||
- gas.acmecorp.com
|
||||
allowedRegex: ^.*acmecorp.com$
|
||||
limitRanges:
|
||||
items:
|
||||
-
|
||||
@@ -71,12 +73,13 @@ spec:
|
||||
min:
|
||||
storage: 1Gi
|
||||
type: PersistentVolumeClaim
|
||||
namespaceQuota: 3
|
||||
namespacesMetadata:
|
||||
additionalAnnotations:
|
||||
capsule.clastix.io/backup: "false"
|
||||
additionalLabels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
namespaceOptions:
|
||||
quota: 3
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
capsule.clastix.io/backup: "false"
|
||||
labels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
networkPolicies:
|
||||
items:
|
||||
-
|
||||
|
||||
@@ -22,6 +22,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
sideEffects: None
|
||||
@@ -117,6 +118,25 @@ webhooks:
|
||||
resources:
|
||||
- networkpolicies
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /nodes
|
||||
failurePolicy: Fail
|
||||
name: nodes.capsule.clastix.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- UPDATE
|
||||
resources:
|
||||
- nodes
|
||||
sideEffects: None
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
clientConfig:
|
||||
|
||||
@@ -34,6 +34,12 @@
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/7/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/0/rules/0/scope
|
||||
value: Namespaced
|
||||
@@ -43,12 +49,12 @@
|
||||
- op: add
|
||||
path: /webhooks/3/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/4/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/5/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/6/rules/0/scope
|
||||
value: Namespaced
|
||||
- op: add
|
||||
path: /webhooks/7/rules/0/scope
|
||||
value: Namespaced
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package rbac
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -25,8 +25,8 @@ type Manager struct {
|
||||
}
|
||||
|
||||
// InjectClient injects the Client interface, required by the Runnable interface
|
||||
func (r *Manager) InjectClient(c client.Client) error {
|
||||
r.Client = c
|
||||
func (c *Manager) InjectClient(client client.Client) error {
|
||||
c.Client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -52,22 +52,22 @@ func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
func (c *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1alpha1.CapsuleConfiguration{}, forOptionPerInstanceName(configurationName)).
|
||||
Complete(r)
|
||||
Complete(c)
|
||||
}
|
||||
|
||||
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
r.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
func (c *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
c.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
|
||||
cfg := configuration.NewCapsuleConfiguration(r.Client, request.Name)
|
||||
cfg := configuration.NewCapsuleConfiguration(c.Client, request.Name)
|
||||
// Validating the Capsule Configuration options
|
||||
if _, err = cfg.ProtectedNamespaceRegexp(); err != nil {
|
||||
panic(errors.Wrap(err, "Invalid configuration for protected Namespace regex"))
|
||||
}
|
||||
|
||||
r.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
c.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ var (
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
Verbs: []string{"delete"},
|
||||
Verbs: []string{"delete", "patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -24,18 +24,20 @@ import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
"github.com/clastix/capsule/pkg/configuration"
|
||||
)
|
||||
|
||||
type CAReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
Configuration configuration.Configuration
|
||||
}
|
||||
|
||||
func (r *CAReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(caSecretName)).
|
||||
For(&corev1.Secret{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
@@ -78,7 +80,7 @@ func (r *CAReconciler) UpdateCustomResourceDefinition(caBundle []byte) error {
|
||||
func (r CAReconciler) UpdateValidatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
vw := &admissionregistrationv1.ValidatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-validating-webhook-configuration"}, vw)
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: r.Configuration.ValidatingWebhookConfigurationName()}, vw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve ValidatingWebhookConfiguration")
|
||||
return err
|
||||
@@ -97,7 +99,7 @@ func (r CAReconciler) UpdateValidatingWebhookConfiguration(caBundle []byte) erro
|
||||
func (r CAReconciler) UpdateMutatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
mw := &admissionregistrationv1.MutatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-mutating-webhook-configuration"}, mw)
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: r.Configuration.MutatingWebhookConfigurationName()}, mw)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot retrieve MutatingWebhookConfiguration")
|
||||
return err
|
||||
@@ -115,6 +117,10 @@ func (r CAReconciler) UpdateMutatingWebhookConfiguration(caBundle []byte) error
|
||||
func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
if request.Name != r.Configuration.CASecretName() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
r.Log.Info("Reconciling CA Secret")
|
||||
|
||||
@@ -128,7 +134,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace, r.Configuration.CASecretName())
|
||||
if err != nil && errors.Is(err, MissingCaError{}) {
|
||||
ca, err = cert.GenerateCertificateAuthority()
|
||||
if err != nil {
|
||||
@@ -157,7 +163,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
|
||||
group := errgroup.Group{}
|
||||
group := new(errgroup.Group)
|
||||
group.Go(func() error {
|
||||
return r.UpdateMutatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
@@ -189,7 +195,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
tls := &corev1.Secret{}
|
||||
err = r.Get(ctx, types.NamespacedName{
|
||||
Namespace: r.Namespace,
|
||||
Name: tlsSecretName,
|
||||
Name: r.Configuration.TLSSecretName(),
|
||||
}, tls)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Capsule TLS Secret missing")
|
||||
|
||||
@@ -6,7 +6,4 @@ package secret
|
||||
const (
|
||||
certSecretKey = "tls.crt"
|
||||
privateKeySecretKey = "tls.key"
|
||||
|
||||
caSecretName = "capsule-ca"
|
||||
tlsSecretName = "capsule-tls"
|
||||
)
|
||||
|
||||
@@ -9,23 +9,20 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
func getCertificateAuthority(client client.Client, namespace string) (ca cert.CA, err error) {
|
||||
func getCertificateAuthority(client client.Client, namespace, name string) (ca cert.CA, err error) {
|
||||
instance := &corev1.Secret{}
|
||||
|
||||
err = client.Get(context.TODO(), types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: caSecretName,
|
||||
Name: name,
|
||||
}, instance)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missing secret %s, cannot reconcile", caSecretName)
|
||||
return nil, fmt.Errorf("missing secret %s, cannot reconcile", name)
|
||||
}
|
||||
|
||||
if instance.Data == nil {
|
||||
@@ -39,24 +36,3 @@ func getCertificateAuthority(client client.Client, namespace string) (ca cert.CA
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
return builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
return filterByName(event.Object.GetName(), instanceName)
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
return filterByName(deleteEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
|
||||
return filterByName(updateEvent.ObjectNew.GetName(), instanceName)
|
||||
},
|
||||
GenericFunc: func(genericEvent event.GenericEvent) bool {
|
||||
return filterByName(genericEvent.Object.GetName(), instanceName)
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func filterByName(objName, desired string) bool {
|
||||
return objName == desired
|
||||
}
|
||||
|
||||
@@ -9,36 +9,43 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
"github.com/clastix/capsule/pkg/configuration"
|
||||
)
|
||||
|
||||
type TLSReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
Configuration configuration.Configuration
|
||||
}
|
||||
|
||||
func (r *TLSReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(tlsSecretName)).
|
||||
For(&corev1.Secret{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
if request.Name != r.Configuration.TLSSecretName() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
r.Log.Info("Reconciling TLS Secret")
|
||||
|
||||
@@ -53,7 +60,7 @@ func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace, r.Configuration.CASecretName())
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
@@ -111,9 +118,39 @@ func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
if instance.Name == tlsSecretName && res == controllerutil.OperationResultUpdated {
|
||||
r.Log.Info("Capsule TLS certificates has been updated, we need to restart the Controller")
|
||||
_ = syscall.Kill(syscall.Getpid(), syscall.SIGINT)
|
||||
if instance.Name == r.Configuration.TLSSecretName() && res == controllerutil.OperationResultUpdated {
|
||||
r.Log.Info("Capsule TLS certificates has been updated, Controller pods must be restarted to load new certificate")
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
leaderPod := &corev1.Pod{}
|
||||
if err = r.Client.Get(ctx, types.NamespacedName{Namespace: os.Getenv("NAMESPACE"), Name: hostname}, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve the leader Pod, probably running in out of the cluster mode")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
podList := &corev1.PodList{}
|
||||
if err = r.Client.List(ctx, podList, client.MatchingLabels(leaderPod.ObjectMeta.Labels)); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve list of Capsule pods requiring restart upon TLS update")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
for _, p := range podList.Items {
|
||||
nonLeaderPod := p
|
||||
// Skipping this Pod, must be deleted at the end
|
||||
if nonLeaderPod.GetName() == leaderPod.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, &nonLeaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the non-leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Reconciliation completed, processing back in " + rq.String())
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -49,12 +50,15 @@ func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request
|
||||
|
||||
err = r.client.Get(ctx, request.NamespacedName, r.obj)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, r.obj, func() (err error) {
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.AdditionalLabels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.AdditionalAnnotations))
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.Labels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.Annotations))
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
@@ -13,8 +14,8 @@ type EndpointSlicesLabelsReconciler struct {
|
||||
abstractServiceLabelsReconciler
|
||||
|
||||
Log logr.Logger
|
||||
VersionMinor int
|
||||
VersionMajor int
|
||||
VersionMinor uint
|
||||
VersionMajor uint
|
||||
}
|
||||
|
||||
func (r *EndpointSlicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
@@ -24,12 +25,16 @@ func (r *EndpointSlicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
if r.VersionMajor == 1 && r.VersionMinor <= 16 {
|
||||
switch {
|
||||
case r.VersionMajor == 1 && r.VersionMinor <= 16:
|
||||
r.Log.Info("Skipping controller setup, as EndpointSlices are not supported on current kubernetes version", "VersionMajor", r.VersionMajor, "VersionMinor", r.VersionMinor)
|
||||
return nil
|
||||
case r.VersionMajor == 1 && r.VersionMinor >= 21:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1.EndpointSlice{}
|
||||
default:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
}
|
||||
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
|
||||
80
controllers/tenant/limitranges.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncLimitRange(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncLimitRange(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting LimitRange labels for the mutateFn
|
||||
var tenantLabel, limitRangeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
if limitRangeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(namespace, keys, &corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
target := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
limitRangeLabel: strconv.Itoa(i),
|
||||
}
|
||||
target.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
129
controllers/tenant/manager.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
RESTConfig *rest.Config
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring limit resources count is updated")
|
||||
if err = r.syncCustomResourceQuotaUsages(ctx, instance); err != nil {
|
||||
r.Log.Error(err, "Cannot count limited resources")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Network Policies")
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *Manager) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
183
controllers/tenant/namespaces.go
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
annotations := make(map[string]string)
|
||||
labels := map[string]string{
|
||||
"name": namespace,
|
||||
capsuleLabel: tnt.GetName(),
|
||||
}
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
annotations["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressOptions.AllowedClasses != nil {
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressOptions.AllowedClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressOptions.AllowedClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceLabelsAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceLabelsAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceLabelsRegexpAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsRegexpAnnotation] = value
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.SetAnnotations(annotations)
|
||||
} else {
|
||||
for k, v := range annotations {
|
||||
ns.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if ns.Labels == nil {
|
||||
ns.SetLabels(labels)
|
||||
} else {
|
||||
for k, v := range labels {
|
||||
ns.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found.Status.Size = tenant.Status.Size
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
list := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), list, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(list.Items)
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
82
controllers/tenant/networkpolicies.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNetworkPolicy(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncNetworkPolicy(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
if err = r.pruningResources(namespace, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
var tenantLabel, networkPolicyLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if networkPolicyLabel, err = capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
target := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
networkPolicyLabel: strconv.Itoa(i),
|
||||
})
|
||||
target.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
251
controllers/tenant/resourcequotas.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// When the Resource Budget assigned to a Tenant is Tenant-scoped we have to rely on the ResourceQuota resources to
|
||||
// represent the resource quota for the single Tenant rather than the single Namespace,
|
||||
// so abusing of this API although its Namespaced scope.
|
||||
//
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in first time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger following reconciliations but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
//
|
||||
// In case of Namespace-scoped Resource Budget, we're just replicating the resources across all registered Namespaces.
|
||||
func (r *Manager) syncResourceQuotas(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeTenant {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
index := i
|
||||
|
||||
resourceQuota := q
|
||||
|
||||
group.Go(func() (scopeErr error) {
|
||||
// Calculating the Resource Budget at Tenant scope just if this is put in place.
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
var tntRequirement *labels.Requirement
|
||||
if tntRequirement, scopeErr = labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
var indexRequirement *labels.Requirement
|
||||
if indexRequirement, scopeErr = labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(index)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
list := &corev1.ResourceQuotaList{}
|
||||
if scopeErr = r.List(context.TODO(), list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
|
||||
return
|
||||
}
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for name, hardQuota := range resourceQuota.Hard {
|
||||
r.Log.Info("Desired hard " + name.String() + " quota is " + hardQuota.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var quantity resource.Quantity
|
||||
for _, item := range list.Items {
|
||||
quantity.Add(item.Status.Used[name])
|
||||
}
|
||||
r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
|
||||
|
||||
switch quantity.Cmp(resourceQuota.Hard[name]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for item := range list.Items {
|
||||
if _, ok := list.Items[item].Status.Used[name]; ok {
|
||||
list.Items[item].Spec.Hard[name] = list.Items[item].Status.Used[name]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[name] = resource.Quantity{}
|
||||
list.Items[item].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for item := range list.Items {
|
||||
if list.Items[item].Spec.Hard == nil {
|
||||
list.Items[item].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
list.Items[item].Spec.Hard[name] = resourceQuota.Hard[name]
|
||||
}
|
||||
}
|
||||
if scopeErr = r.resourceQuotasUpdate(name, quantity, resourceQuota.Hard[name], list.Items...); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "cannot proceed with outer ResourceQuota")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
// Waiting the update of all ResourceQuotas
|
||||
if err = group.Wait(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncResourceQuota(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncResourceQuota(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Pruning resource of non-requested resources
|
||||
if err = r.pruningResources(namespace, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for index, resQuota := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, index),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
res, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(index),
|
||||
})
|
||||
target.Spec.Scopes = resQuota.Scopes
|
||||
target.Spec.ScopeSelector = resQuota.ScopeSelector
|
||||
// In case of Namespace scope for the ResourceQuota we can easily apply the bare specification
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeNamespace {
|
||||
target.Spec.Hard = resQuota.Hard
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *Manager) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
|
||||
group.Go(func() (err error) {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err = r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
_, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
122
controllers/tenant/resourcequotas_quota.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
func (r *Manager) syncCustomResourceQuotaUsages(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
|
||||
type resource struct {
|
||||
kind string
|
||||
group string
|
||||
version string
|
||||
}
|
||||
|
||||
var resourceList []resource
|
||||
|
||||
for k := range tenant.GetAnnotations() {
|
||||
if !strings.HasPrefix(k, capsulev1beta1.ResourceQuotaAnnotationPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.Split(k, "/")
|
||||
if len(parts) != 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
parts = strings.Split(parts[1], "_")
|
||||
|
||||
if len(parts) != 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve version", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
groupKindParts := strings.Split(parts[0], ".")
|
||||
if len(groupKindParts) < 2 {
|
||||
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve kind and group", "key", k)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
resourceList = append(resourceList, resource{
|
||||
kind: groupKindParts[0],
|
||||
group: strings.Join(groupKindParts[1:], "."),
|
||||
version: parts[1],
|
||||
})
|
||||
}
|
||||
|
||||
errGroup := new(errgroup.Group)
|
||||
|
||||
usedMap := make(map[string]int)
|
||||
|
||||
defer func() {
|
||||
for gvk, used := range usedMap {
|
||||
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
tnt := &capsulev1beta1.Tenant{}
|
||||
if retryErr = r.Client.Get(ctx, types.NamespacedName{Name: tenant.GetName()}, tnt); retryErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if tnt.GetAnnotations() == nil {
|
||||
tnt.Annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(gvk)] = fmt.Sprintf("%d", used)
|
||||
|
||||
return r.Client.Update(ctx, tnt)
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "cannot update custom Resource Quota", "GVK", gvk)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for _, item := range resourceList {
|
||||
res := item
|
||||
|
||||
errGroup.Go(func() (scopeErr error) {
|
||||
dynamicClient := dynamic.NewForConfigOrDie(r.RESTConfig)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
var list *unstructured.UnstructuredList
|
||||
|
||||
list, scopeErr = dynamicClient.Resource(schema.GroupVersionResource{Group: res.group, Version: res.version, Resource: res.kind}).List(ctx, metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.namespace==%s", ns),
|
||||
})
|
||||
if scopeErr != nil {
|
||||
return scopeErr
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%s.%s_%s", res.kind, res.group, res.version)
|
||||
|
||||
if _, ok := usedMap[key]; !ok {
|
||||
usedMap[key] = 0
|
||||
}
|
||||
|
||||
usedMap[key] += len(list.Items)
|
||||
}
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
if err := errGroup.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
179
controllers/tenant/rolebindings.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *Manager) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hashFn := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hashFn(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncAdditionalRoleBinding(tenant, namespace, keys, hashFn)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncAdditionalRoleBinding(tenant *capsulev1beta1.Tenant, ns string, keys []string, hashFn func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string) (err error) {
|
||||
var tenantLabel, roleBindingLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if roleBindingLabel, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
roleBindingHashLabel := hashFn(roleBinding)
|
||||
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() error {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
roleBindingLabel: roleBindingHashLabel,
|
||||
}
|
||||
target.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
target.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", target.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *Manager) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
list := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for namespacedName, roleRef := range list {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = newLabels
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = roleRef
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
63
controllers/tenant/utils.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *Manager) pruningResources(ns string, keys []string, obj client.Object) (err error) {
|
||||
var capsuleLabel string
|
||||
if capsuleLabel, err = capsulev1beta1.GetTypeLabel(obj); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
|
||||
var exists *labels.Requirement
|
||||
if exists, err = labels.NewRequirement(capsuleLabel, selection.Exists, []string{}); err != nil {
|
||||
return
|
||||
}
|
||||
selector = selector.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
if notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
selector = selector.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + selector.String())
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: selector,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
@@ -1,739 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// TenantReconciler reconciles a Tenant object
|
||||
type TenantReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TenantReconciler) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
err = r.Get(ctx, request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
if instance.Spec.NetworkPolicies != nil {
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies.Items))
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.LimitRanges != nil {
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.ResourceQuota != nil {
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client.Object) error {
|
||||
capsuleLabel, err := capsulev1beta1.GetTypeLabel(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := labels.NewSelector()
|
||||
|
||||
exists, err := labels.NewRequirement(capsuleLabel, selection.Exists, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + s.String())
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: s,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *TenantReconciler) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) error {
|
||||
g := errgroup.Group{}
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
g.Go(func() error {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = g.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *TenantReconciler) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hash := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hash(i))
|
||||
}
|
||||
|
||||
var tl, ll string
|
||||
tl, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ll, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
lv := hash(roleBinding)
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, rb, func() error {
|
||||
rb.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: lv,
|
||||
}
|
||||
rb.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
rb.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, rb, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, rb.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", rb.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", rb.Name, "namespace", rb.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// We're relying on the ResourceQuota resource to represent the resource quota for the single Tenant rather than the
|
||||
// single Namespace, so abusing of this API although its Namespaced scope.
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in a second time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger a following reconciliation but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
func (r *TenantReconciler) syncResourceQuotas(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
tenantLabel, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
typeLabel, err := capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(i),
|
||||
})
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
tr, err := labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
ir, err := labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(i)})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
rql := &corev1.ResourceQuotaList{}
|
||||
err = r.List(context.TODO(), rql, &client.ListOptions{
|
||||
LabelSelector: labels.NewSelector().Add(*tr).Add(*ir),
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot list ResourceQuota", "tenantFilter", tr.String(), "indexFilter", ir.String())
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for rn, rq := range q.Hard {
|
||||
r.Log.Info("Desired hard " + rn.String() + " quota is " + rq.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var qt resource.Quantity
|
||||
for _, rq := range rql.Items {
|
||||
qt.Add(rq.Status.Used[rn])
|
||||
}
|
||||
r.Log.Info("Computed " + rn.String() + " quota for the whole Tenant is " + qt.String())
|
||||
|
||||
switch qt.Cmp(q.Hard[rn]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for i := range rql.Items {
|
||||
if _, ok := rql.Items[i].Status.Used[rn]; ok {
|
||||
rql.Items[i].Spec.Hard[rn] = rql.Items[i].Status.Used[rn]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[rn] = resource.Quantity{}
|
||||
rql.Items[i].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for i := range rql.Items {
|
||||
if rql.Items[i].Spec.Hard == nil {
|
||||
rql.Items[i].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
rql.Items[i].Spec.Hard[rn] = q.Hard[rn]
|
||||
}
|
||||
target.Spec = q
|
||||
}
|
||||
if err := r.resourceQuotasUpdate(rn, qt, q.Hard[rn], rql.Items...); err != nil {
|
||||
r.Log.Error(err, "cannot proceed with outer ResourceQuota")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting LimitRange labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ll, err := capsulev1beta1.GetTypeLabel(&corev1.LimitRange{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.LimitRange{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
t := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: strconv.Itoa(i),
|
||||
}
|
||||
t.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
a := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalAnnotations {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
a["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressClasses != nil {
|
||||
if len(tnt.Spec.IngressClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
ns.SetAnnotations(a)
|
||||
|
||||
l := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalLabels {
|
||||
l[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
l["name"] = namespace
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
l[capsuleLabel] = tnt.GetName()
|
||||
ns.SetLabels(l)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := errgroup.Group{}
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nl, err := capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
t := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.SetLabels(map[string]string{
|
||||
tl: tenant.Name,
|
||||
nl: strconv.Itoa(i),
|
||||
})
|
||||
t.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *TenantReconciler) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
rbl := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for nn, rr := range rbl {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nn.Name,
|
||||
Namespace: nn.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = l
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = rr
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
found.Status.Size = tenant.Status.Size
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
nl := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), nl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(nl.Items)
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
8
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
*.log
|
||||
.cache
|
||||
.DS_Store
|
||||
src/.temp
|
||||
node_modules
|
||||
dist
|
||||
.env
|
||||
.env.*
|
||||
12
docs/README.md
Normal file
@@ -0,0 +1,12 @@
|
||||
# Capsule Documentation
|
||||
|
||||
1. Ensure to have [`yarn`](https://classic.yarnpkg.com/lang/en/docs/install/#debian-stable) installed in your path.
|
||||
2. `yarn install`
|
||||
|
||||
## Local development
|
||||
|
||||
```shell
|
||||
yarn develop
|
||||
```
|
||||
|
||||
This will create a local webserver listening on `localhost:8080` with hot-reload of your local changes.
|
||||
|
Before Width: | Height: | Size: 29 KiB After Width: | Height: | Size: 29 KiB |
BIN
docs/content/assets/proxy-kubernetes-dashboard.png
Normal file
|
After Width: | Height: | Size: 294 KiB |
BIN
docs/content/assets/proxy-lens.png
Normal file
|
After Width: | Height: | Size: 283 KiB |
BIN
docs/content/contributing/assets/dev-env.png
Normal file
|
After Width: | Height: | Size: 111 KiB |
341
docs/content/contributing/development.md
Normal file
@@ -0,0 +1,341 @@
|
||||
# Capsule Development
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Make sure you have these tools installed:
|
||||
|
||||
- [Go 1.16+](https://golang.org/dl/)
|
||||
- [Operator SDK 1.7.2+](https://github.com/operator-framework/operator-sdk), or [Kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
|
||||
- [KinD](https://github.com/kubernetes-sigs/kind) or [k3d](https://k3d.io/), with `kubectl`
|
||||
- [ngrok](https://ngrok.com/) (if you want to run locally with remote Kubernetes)
|
||||
- [golangci-lint](https://github.com/golangci/golangci-lint)
|
||||
- OpenSSL
|
||||
|
||||
## Setup a Kubernetes Cluster
|
||||
|
||||
A lightweight Kubernetes within your laptop can be very handy for Kubernetes-native development like Capsule.
|
||||
|
||||
### By `k3d`
|
||||
|
||||
```shell
|
||||
# Install K3d cli by brew in Mac, or your preferred way
|
||||
$ brew install k3d
|
||||
|
||||
# Export your laptop's IP, e.g. retrieving it by: ifconfig
|
||||
# Do change this IP to yours
|
||||
$ export LAPTOP_HOST_IP=192.168.10.101
|
||||
|
||||
# Spin up a bare minimum cluster
|
||||
# Refer to here for more options: https://k3d.io/v4.4.8/usage/commands/k3d_cluster_create/
|
||||
$ k3d cluster create k3s-capsule --servers 1 --agents 1 --no-lb --k3s-server-arg --tls-san=${LAPTOP_HOST_IP}
|
||||
|
||||
# Get Kubeconfig
|
||||
$ k3d kubeconfig get k3s-capsule > /tmp/k3s-capsule && export KUBECONFIG="/tmp/k3s-capsule"
|
||||
|
||||
# This will create a cluster with 1 server and 1 worker node
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
k3d-k3s-capsule-server-0 Ready control-plane,master 2m13s v1.21.2+k3s1
|
||||
k3d-k3s-capsule-agent-0 Ready <none> 2m3s v1.21.2+k3s1
|
||||
|
||||
# Or 2 Docker containers if you view it from Docker perspective
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
5c26ad840c62 rancher/k3s:v1.21.2-k3s1 "/bin/k3s agent" 53 seconds ago Up 45 seconds k3d-k3s-capsule-agent-0
|
||||
753998879b28 rancher/k3s:v1.21.2-k3s1 "/bin/k3s server --t…" 53 seconds ago Up 51 seconds 0.0.0.0:49708->6443/tcp k3d-k3s-capsule-server-0
|
||||
```
|
||||
|
||||
### By `kind`
|
||||
|
||||
```shell
|
||||
# # Install kind cli by brew in Mac, or your preferred way
|
||||
$ brew install kind
|
||||
|
||||
# Prepare a kind config file with necessary customization
|
||||
$ cat > kind.yaml <<EOF
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
apiServerAddress: "0.0.0.0"
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
metadata:
|
||||
name: config
|
||||
apiServer:
|
||||
certSANs:
|
||||
- localhost
|
||||
- 127.0.0.1
|
||||
- kubernetes
|
||||
- kubernetes.default.svc
|
||||
- kubernetes.default.svc.cluster.local
|
||||
- kind
|
||||
- 0.0.0.0
|
||||
- ${LAPTOP_HOST_IP}
|
||||
- role: worker
|
||||
EOF
|
||||
|
||||
# Spin up a bare minimum cluster with 1 master 1 worker node
|
||||
$ kind create cluster --name kind-capsule --config kind.yaml
|
||||
|
||||
# This will create a cluster with 1 server and 1 worker node
|
||||
$ kubectl get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
kind-capsule-control-plane Ready control-plane,master 84s v1.21.1
|
||||
kind-capsule-worker Ready <none> 56s v1.21.1
|
||||
|
||||
# Or 2 Docker containers if you view it from Docker perspective
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
7b329fd3a838 kindest/node:v1.21.1 "/usr/local/bin/entr…" About a minute ago Up About a minute 0.0.0.0:54894->6443/tcp kind-capsule-control-plane
|
||||
7d50f1633555 kindest/node:v1.21.1 "/usr/local/bin/entr…" About a minute ago Up About a minute kind-capsule-worker
|
||||
```
|
||||
|
||||
## Fork, build, and deploy Capsule
|
||||
|
||||
The `fork-clone-contribute-pr` flow is common for contributing to OSS projects like Kubernetes and Capsule.
|
||||
|
||||
Let's assume you've forked it into your GitHub namespace, say `myuser`, and then you can clone it with Git protocol.
|
||||
Do remember to change the `myuser` to yours.
|
||||
|
||||
```shell
|
||||
$ git clone git@github.com:myuser/capsule.git && cd capsule
|
||||
```
|
||||
|
||||
It's a good practice to add the upstream as the remote too so we can easily fetch and merge the upstream to our fork:
|
||||
|
||||
```shell
|
||||
$ git remote add upstream https://github.com/clastix/capsule.git
|
||||
$ git remote -vv
|
||||
origin git@github.com:myuser/capsule.git (fetch)
|
||||
origin git@github.com:myuser/capsule.git (push)
|
||||
upstream https://github.com/clastix/capsule.git (fetch)
|
||||
upstream https://github.com/clastix/capsule.git (push)
|
||||
```
|
||||
|
||||
Build and deploy:
|
||||
|
||||
```shell
|
||||
# Download the project dependencies
|
||||
$ go mod download
|
||||
|
||||
# Build the Capsule image
|
||||
$ make docker-build
|
||||
|
||||
# Retrieve the built image version
|
||||
$ export CAPSULE_IMAGE_VESION=`docker images --format '{{.Tag}}' quay.io/clastix/capsule`
|
||||
|
||||
# If k3s, load the image into cluster by
|
||||
$ k3d image import --cluster k3s-capsule capsule quay.io/clastix/capsule:${CAPSULE_IMAGE_VESION}
|
||||
# If Kind, load the image into cluster by
|
||||
$ kind load docker-image --name kind-capsule quay.io/clastix/capsule:${CAPSULE_IMAGE_VESION}
|
||||
|
||||
# deploy all the required manifests
|
||||
# Note: 1) please retry if you saw errors; 2) if you want to clean it up first, run: make remove
|
||||
$ make deploy
|
||||
|
||||
# Make sure the controller is running
|
||||
$ kubectl get pod -n capsule-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
capsule-controller-manager-5c6b8445cf-566dc 1/1 Running 0 23s
|
||||
|
||||
# Check the logs if needed
|
||||
$ kubectl -n capsule-system logs --all-containers -l control-plane=controller-manager
|
||||
|
||||
# You may have a try to deploy a Tenant too to make sure it works end to end
|
||||
$ kubectl apply -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- name: alice
|
||||
kind: User
|
||||
- name: system:serviceaccount:capsule-system:default
|
||||
kind: ServiceAccount
|
||||
EOF
|
||||
|
||||
# There shouldn't be any errors and you should see the newly created tenant
|
||||
$ kubectl get tenants
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
oil Active 0 14s
|
||||
```
|
||||
|
||||
If you want to test namespace creation or such stuff, make sure to use impersonation:
|
||||
|
||||
```sh
|
||||
$ kubectl ... --as system:serviceaccount:capsule-system:default --as-group capsule.clastix.io
|
||||
```
|
||||
|
||||
As of now, a complete Capsule environment has been set up in `kind`- or `k3d`-powered cluster, and the `capsule-controller-manager` is running as a deployment serving as:
|
||||
|
||||
- The reconcilers for CRDs and;
|
||||
- A series of webhooks
|
||||
|
||||
## Setup the development environment
|
||||
|
||||
During development, we prefer that the code is running within our IDE locally, instead of running as the normal Pod(s) within the Kubernetes cluster.
|
||||
|
||||
Such a setup can be illustrated as below diagram:
|
||||
|
||||

|
||||
|
||||
To achieve that, there are some necessary steps we need to walk through, which have been made as a `make` target within our `Makefile`.
|
||||
|
||||
So the TL;DR answer is:
|
||||
|
||||
```shell
|
||||
# If you haven't installed or run `make deploy` before, do it first
|
||||
# Note: please retry if you saw errors
|
||||
$ make deploy
|
||||
|
||||
# To retrieve your laptop's IP and execute `make dev-setup` to setup dev env
|
||||
# For example: LAPTOP_HOST_IP=192.168.10.101 make dev-setup
|
||||
$ LAPTOP_HOST_IP="<YOUR_LAPTOP_IP>" make dev-setup
|
||||
```
|
||||
|
||||
This is a very common setup for typical Kubernetes Operator development so we'd better walk them through with more details here.
|
||||
|
||||
1. Scaling down the deployed Pod(s) to 0
|
||||
|
||||
We need to scale the existing replicas of `capsule-controller-manager` to 0 to avoid reconciliation competition between the Pod(s) and the code running outside of the cluster, in our preferred IDE for example.
|
||||
|
||||
```shell
|
||||
$ kubectl -n capsule-system scale deployment capsule-controller-manager --replicas=0
|
||||
deployment.apps/capsule-controller-manager scaled
|
||||
```
|
||||
|
||||
2. Preparing TLS certificate for the webhooks
|
||||
|
||||
Running webhooks requires TLS, we can prepare the TLS key pair in our development env to handle HTTPS requests.
|
||||
|
||||
```shell
|
||||
# Prepare a simple OpenSSL config file
|
||||
# Do remember to export LAPTOP_HOST_IP before running this command
|
||||
$ cat > _tls.cnf <<EOF
|
||||
[ req ]
|
||||
default_bits = 4096
|
||||
distinguished_name = req_distinguished_name
|
||||
req_extensions = req_ext
|
||||
[ req_distinguished_name ]
|
||||
countryName = SG
|
||||
stateOrProvinceName = SG
|
||||
localityName = SG
|
||||
organizationName = CAPSULE
|
||||
commonName = CAPSULE
|
||||
[ req_ext ]
|
||||
subjectAltName = @alt_names
|
||||
[alt_names]
|
||||
IP.1 = ${LAPTOP_HOST_IP}
|
||||
EOF
|
||||
|
||||
# Create this dir to mimic the Pod mount point
|
||||
$ mkdir -p /tmp/k8s-webhook-server/serving-certs
|
||||
|
||||
# Generate the TLS cert/key under /tmp/k8s-webhook-server/serving-certs
|
||||
$ openssl req -newkey rsa:4096 -days 3650 -nodes -x509 \
|
||||
-subj "/C=SG/ST=SG/L=SG/O=CAPSULE/CN=CAPSULE" \
|
||||
-extensions req_ext \
|
||||
-config _tls.cnf \
|
||||
-keyout /tmp/k8s-webhook-server/serving-certs/tls.key \
|
||||
-out /tmp/k8s-webhook-server/serving-certs/tls.crt
|
||||
|
||||
# Clean it up
|
||||
$ rm -f _tls.cnf
|
||||
```
|
||||
|
||||
3. Patching the Webhooks
|
||||
|
||||
By default, the webhooks will be registered with the services, which will route to the Pods, inside the cluster.
|
||||
|
||||
We need to _delegate_ the controllers' and webbooks' services to the code running in our IDE by patching the `MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration`.
|
||||
|
||||
```shell
|
||||
# Export your laptop's IP with the 9443 port exposed by controllers/webhooks' services
|
||||
$ export WEBHOOK_URL="https://${LAPTOP_HOST_IP}:9443"
|
||||
|
||||
# Export the cert we just generated as the CA bundle for webhook TLS
|
||||
$ export CA_BUNDLE=`openssl base64 -in /tmp/k8s-webhook-server/serving-certs/tls.crt | tr -d '\n'`
|
||||
|
||||
# Patch the MutatingWebhookConfiguration webhook
|
||||
$ kubectl patch MutatingWebhookConfiguration capsule-mutating-webhook-configuration \
|
||||
--type='json' -p="[\
|
||||
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/mutate-v1-namespace-owner-reference\",'caBundle':\"${CA_BUNDLE}\"}}\
|
||||
]"
|
||||
|
||||
# Verify it if you want
|
||||
$ kubectl get MutatingWebhookConfiguration capsule-mutating-webhook-configuration -o yaml
|
||||
|
||||
# Patch the ValidatingWebhookConfiguration webhooks
|
||||
# Note: there is a list of validating webhook endpoints, not just one
|
||||
$ kubectl patch ValidatingWebhookConfiguration capsule-validating-webhook-configuration \
|
||||
--type='json' -p="[\
|
||||
{'op': 'replace', 'path': '/webhooks/0/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/cordoning\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/1/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/ingresses\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/2/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/namespaces\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/3/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/networkpolicies\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/4/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/pods\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/5/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/persistentvolumeclaims\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/6/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/services\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/7/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/tenants\",'caBundle':\"${CA_BUNDLE}\"}},\
|
||||
{'op': 'replace', 'path': '/webhooks/8/clientConfig', 'value':{'url':\"${WEBHOOK_URL}/nodes\",'caBundle':\"${CA_BUNDLE}\"}}\
|
||||
]"
|
||||
|
||||
# Verify it if you want
|
||||
$ kubectl get ValidatingWebhookConfiguration capsule-validating-webhook-configuration -o yaml
|
||||
```
|
||||
|
||||
## Run Capsule outside the cluster
|
||||
|
||||
Now we can run Capsule controllers with webhooks outside of the Kubernetes cluster:
|
||||
|
||||
```shell
|
||||
$ export NAMESPACE=capsule-system && export TMPDIR=/tmp/
|
||||
$ go run .
|
||||
```
|
||||
|
||||
To verify that, we can open a new console and create a new Tenant:
|
||||
|
||||
```shell
|
||||
$ kubectl apply -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owners:
|
||||
- name: alice
|
||||
kind: User
|
||||
EOF
|
||||
```
|
||||
|
||||
We should see output and logs in the `make run` console.
|
||||
|
||||
Now it's time to work through our familiar inner loop for development in our preferred IDE. For example, if you're using [Visual Studio Code](https://code.visualstudio.com), this `launch.json` file can be a good start.
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}",
|
||||
"args": [
|
||||
"--zap-encoder=console",
|
||||
"--zap-log-level=debug",
|
||||
"--configuration-name=capsule-default"
|
||||
],
|
||||
"env": {
|
||||
"NAMESPACE": "capsule-system",
|
||||
"TMPDIR": "/tmp/"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
22
docs/content/contributing/governance.md
Normal file
@@ -0,0 +1,22 @@
|
||||
# Project Governance
|
||||
|
||||
This document lays out the guidelines under which the Capsule project will be governed.
|
||||
The goal is to make sure that the roles and responsibilities are well defined and clarify how decisions are made.
|
||||
|
||||
## Roles
|
||||
|
||||
In the context of Capsule project, we consider the following roles:
|
||||
|
||||
* __Users__: everyone using Capsule, typically willing to provide feedback by proposing features and/or filing issues.
|
||||
|
||||
* __Contributors__: everyone contributing code, documentation, examples, tests, and participating in feature proposals as well as design discussions.
|
||||
|
||||
* __Maintainers__: are responsible for engaging with and assisting contributors to iterate on the contributions until it reaches acceptable quality. Maintainers can decide whether the contributions can be accepted into the project or rejected.
|
||||
|
||||
## Release Management
|
||||
|
||||
The release process will be governed by Maintainers.
|
||||
|
||||
## Roadmap Planning
|
||||
|
||||
Maintainers will share roadmap and release versions as milestones in GitHub.
|
||||
111
docs/content/contributing/guidelines.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Contributing Guidelines
|
||||
|
||||
Thank you for your interest in contributing to Capsule. Whether it's a bug report, new feature, correction, or additional documentation, we greatly value feedback and contributions from our community.
|
||||
|
||||
Please read through this document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your bug report or contribution.
|
||||
|
||||
## Pull Requests
|
||||
|
||||
Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
|
||||
|
||||
1. You are working against the latest source on the *master* branch.
|
||||
1. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
|
||||
1. You open an issue to discuss any significant work: we would hate for your time to be wasted.
|
||||
|
||||
To send us a pull request, please:
|
||||
|
||||
1. Fork the repository.
|
||||
1. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it
|
||||
will be hard for us to focus on your change.
|
||||
1. Ensure local tests pass.
|
||||
1. Commit to your fork using clear commit messages.
|
||||
1. Send us a pull request, answering any default questions in the pull request interface.
|
||||
1. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
|
||||
|
||||
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
|
||||
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
|
||||
|
||||
Make sure to keep Pull Requests small and functional to make them easier to review, understand, and look up in commit history. This repository uses "Squash and Commit" to keep our history clean and make it easier to revert changes based on PR.
|
||||
|
||||
Adding the appropriate documentation, unit tests and e2e tests as part of a feature is the responsibility of the
|
||||
feature owner, whether it is done in the same Pull Request or not.
|
||||
|
||||
All the Pull Requests must refer to an already open issue: this is the first phase to contribute also for informing maintainers about the issue.
|
||||
|
||||
## Commits
|
||||
|
||||
Commit's first line should not exceed 50 columns.
|
||||
|
||||
A commit description is welcomed to explain more the changes: just ensure
|
||||
to put a blank line and an arbitrary number of maximum 72 characters long
|
||||
lines, at most one blank line between them.
|
||||
|
||||
Please, split changes into several and documented small commits: this will help us to perform a better review. Commits must follow the Conventional Commits Specification, a lightweight convention on top of commit messages. It provides an easy set of rules for creating an explicit commit history; which makes it easier to write automated tools on top of. This convention dovetails with Semantic Versioning, by describing the features, fixes, and breaking changes made in commit messages. See [Conventional Commits Specification](https://www.conventionalcommits.org) to learn about Conventional Commits.
|
||||
|
||||
> In case of errors or need of changes to previous commits,
|
||||
> fix them squashing to make changes atomic.
|
||||
|
||||
## Code convention
|
||||
|
||||
Capsule is written in Golang. The changes must follow the Pull Request method where a _GitHub Action_ will
|
||||
check the `golangci-lint`, so ensure your changes respect the coding standard.
|
||||
|
||||
### golint
|
||||
|
||||
You can easily check them issuing the _Make_ recipe `golint`.
|
||||
|
||||
```
|
||||
# make golint
|
||||
golangci-lint run -c .golangci.yml
|
||||
```
|
||||
|
||||
> Enabled linters and related options are defined in the [.golanci.yml file](https://github.com/clastix/capsule/blob/master/.golangci.yml)
|
||||
|
||||
### goimports
|
||||
|
||||
Also, the Go import statements must be sorted following the best practice:
|
||||
|
||||
```
|
||||
<STANDARD LIBRARY>
|
||||
|
||||
<EXTERNAL PACKAGES>
|
||||
|
||||
<LOCAL PACKAGES>
|
||||
```
|
||||
|
||||
To help you out you can use the _Make_ recipe `goimports`
|
||||
|
||||
```
|
||||
# make goimports
|
||||
goimports -w -l -local "github.com/clastix/capsule" .
|
||||
```
|
||||
|
||||
## Finding contributions to work on
|
||||
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the
|
||||
default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted'
|
||||
and 'good first issue' issues are a great place to start.
|
||||
|
||||
## Design Docs
|
||||
|
||||
A contributor proposes a design with a PR on the repository to allow for revisions and discussions.
|
||||
If a design needs to be discussed before formulating a document for it, make use of GitHub Discussions to
|
||||
involve the community on the discussion.
|
||||
|
||||
## GitHub Issues
|
||||
|
||||
GitHub Issues are used to file bugs, work items, and feature requests with actionable items/issues.
|
||||
|
||||
When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
|
||||
|
||||
* A reproducible test case or series of steps
|
||||
* The version of the code being used
|
||||
* Any modifications you've made relevant to the bug
|
||||
* Anything unusual about your environment or deployment
|
||||
|
||||
## Miscellanea
|
||||
|
||||
Please, add a new single line at end of any file as the current coding style.
|
||||
|
||||
## Licensing
|
||||
|
||||
See the [LICENSE](https://github.com/clastix/capsule/blob/master/LICENSE) file for our project's licensing. We can ask you to confirm the licensing of your contribution.
|
||||
3
docs/content/contributing/index.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Contributing
|
||||
|
||||
Guidelines for community contribution.
|
||||
120
docs/content/general/getting-started.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Getting started
|
||||
|
||||
Thanks for giving Capsule a try.
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the [single YAML file installer](https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml)
|
||||
* Use the [Capsule Helm Chart](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md)
|
||||
|
||||
### Install with the single YAML file installer
|
||||
|
||||
Ensure you have `kubectl` installed in your `PATH`. Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
### Install with Helm Chart
|
||||
|
||||
Please, refer to the instructions reported in the Capsule Helm Chart [README](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md).
|
||||
|
||||
## Create your first Tenant
|
||||
|
||||
In Capsule, a _Tenant_ is an abstraction to group multiple namespaces in a single entity within a set of boundaries defined by the Cluster Administrator. The tenant is then assigned to a user or group of users who is called _Tenant Owner_.
|
||||
|
||||
Capsule defines a Tenant as Custom Resource with cluster scope.
|
||||
|
||||
Create the tenant as cluster admin:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- name: alice
|
||||
kind: User
|
||||
EOF
|
||||
```
|
||||
|
||||
You can check the tenant just created
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
oil Active 0 10s
|
||||
```
|
||||
|
||||
## Login as Tenant Owner
|
||||
|
||||
Each tenant comes with a delegated user or group of users acting as the tenant admin. In the Capsule jargon, this is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant Owner.
|
||||
|
||||
Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have in their token:
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
The [hack/create-user.sh](https://github.com/clastix/capsule/blob/master/hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh alice oil
|
||||
...
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil created
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil approved
|
||||
kubeconfig file is: alice-oil.kubeconfig
|
||||
to use it as alice export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
Login as tenant owner
|
||||
|
||||
```
|
||||
$ export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
## Create namespaces
|
||||
|
||||
As tenant owner, you can create namespaces:
|
||||
|
||||
```
|
||||
$ kubectl create namespace oil-production
|
||||
$ kubectl create namespace oil-development
|
||||
```
|
||||
|
||||
And operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
## Limiting access
|
||||
|
||||
Tenant Owners have full administrative permissions limited to only the namespaces in the assigned tenant. They can create any namespaced resource in their namespaces but they do not have access to cluster resources or resources belonging to other tenants they do not own:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden:
|
||||
User "alice" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
See the [tutorial](/docs/general/tutorial) for getting more cool things you can do with Capsule.
|
||||
2
docs/content/general/index.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# Documentation
|
||||
General documentation for Capsule Operator
|
||||
11
docs/content/general/lens.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Capsule extension for Lens
|
||||
With Capsule extension for [Lens](https://github.com/lensapp/lens), a cluster administrator can easily manage from a single pane of glass all resources of a Kubernetes cluster, including all the Tenants created through the Capsule Operator.
|
||||
|
||||
## Features
|
||||
Capsule extension for Lens provides these capabilities:
|
||||
|
||||
- List all tenants
|
||||
- See tenant details and change through the embedded Lens editor
|
||||
- Check Resources Quota and Budget at both the tenant and namespace level
|
||||
|
||||
Please, see the [README](https://github.com/clastix/capsule-lens-extension) for details about the installation of the Capsule Lens Extension.
|
||||
2284
docs/content/general/mtb.md
Normal file
420
docs/content/general/proxy.md
Normal file
@@ -0,0 +1,420 @@
|
||||
# Capsule Proxy
|
||||
|
||||
Capsule Proxy is an add-on for Capsule Operator addressing some RBAC issues when enabling multi-tenacy in Kubernetes since users cannot list the owned cluster-scoped resources.
|
||||
|
||||
Kubernetes RBAC cannot list only the owned cluster-scoped resources since there are no ACL-filtered APIs. For example:
|
||||
|
||||
```
|
||||
$ kubectl get namespaces
|
||||
Error from server (Forbidden): namespaces is forbidden:
|
||||
User "alice" cannot list resource "namespaces" in API group "" at the cluster scope
|
||||
```
|
||||
|
||||
However, the user can have permissions on some namespaces
|
||||
|
||||
```
|
||||
$ kubectl auth can-i [get|list|watch|delete] ns oil-production
|
||||
yes
|
||||
```
|
||||
|
||||
The reason, as the error message reported, is that the RBAC _list_ action is available only at Cluster-Scope and it is not granted to users without appropriate permissions.
|
||||
|
||||
To overcome this problem, many Kubernetes distributions introduced mirrored custom resources supported by a custom set of ACL-filtered APIs. However, this leads to radically change the user's experience of Kubernetes by introducing hard customizations that make it painful to move from one distribution to another.
|
||||
|
||||
With **Capsule**, we took a different approach. As one of the key goals, we want to keep the same user's experience on all the distributions of Kubernetes. We want people to use the standard tools they already know and love and it should just work.
|
||||
|
||||
## How it works
|
||||
|
||||
The `capsule-proxy` implements a simple reverse proxy that intercepts only specific requests to the APIs server and Capsule does all the magic behind the scenes.
|
||||
|
||||
Current implementation filters the following requests:
|
||||
|
||||
* `/api/scheduling.k8s.io/{v1}/priorityclasses{/name}`
|
||||
* `/api/v1/namespaces`
|
||||
* `/api/v1/nodes{/name}`
|
||||
* `/api/v1/pods?fieldSelector=spec.nodeName%3D{name}`
|
||||
* `/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/{name}`
|
||||
* `/apis/metrics.k8s.io/{v1beta1}/nodes{/name}`
|
||||
* `/apis/networking.k8s.io/{v1,v1beta1}/ingressclasses{/name}`
|
||||
* `/apis/storage.k8s.io/v1/storageclasses{/name}`
|
||||
|
||||
All other requests are proxied transparently to the APIs server, so no side effects are expected. We're planning to add new APIs in the future, so [PRs are welcome](https://github.com/clastix/capsule-proxy)!
|
||||
|
||||
## Installation
|
||||
|
||||
Capsule Proxy is an optional add-on of the main Capsule Operator, so make sure you have a working instance of Capsule before attempting to install it.
|
||||
Use the `capsule-proxy` only if you want Tenant Owners to list their own Cluster-Scope resources.
|
||||
|
||||
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the APIs server.
|
||||
Optionally, it can be deployed as a sidecar container in the backend of a dashboard.
|
||||
|
||||
Running outside a Kubernetes cluster is also viable, although a valid `KUBECONFIG` file must be provided, using the environment variable `KUBECONFIG` or the default file in `$HOME/.kube/config`.
|
||||
|
||||
A Helm Chart is available [here](https://github.com/clastix/capsule-proxy/blob/master/charts/capsule-proxy/README.md).
|
||||
|
||||
Depending on your environment, you can expose the `capsule-proxy` by:
|
||||
|
||||
- Ingress
|
||||
- NodePort Service
|
||||
- LoadBalance Service
|
||||
- HostPort
|
||||
- HostNetwork
|
||||
|
||||
Here how it looks like when exposed through an Ingress Controller:
|
||||
|
||||
```
|
||||
+-----------+ +-----------+ +-----------+
|
||||
kubectl ------>|:443 |--------->|:9001 |-------->|:6443 |
|
||||
+-----------+ +-----------+ +-----------+
|
||||
ingress-controller capsule-proxy kube-apiserver
|
||||
```
|
||||
|
||||
## CLI flags
|
||||
|
||||
- `capsule-configuration-name`: name of the `CapsuleConfiguration` resource which is containing the [Capsule configurations](/docs/general/references/#capsule-configuration) (default: `default`)
|
||||
- `capsule-user-group` (deprecated): old way to specify the user groups which request must be intercepted by the proxy
|
||||
- `ignored-user-group`: names of the groups which requests must be ignored and proxy-passed to the upstream server
|
||||
- `listening-port`: HTTP port the proxy listens to (default: `9001`)
|
||||
- `oidc-username-claim`: the OIDC field name used to identify the user (default: `preferred_username`), the proper value can be extracted from the Kubernetes API Server flags
|
||||
- `enable-ssl`: enable the bind on HTTPS for secure communication, allowing client-based certificate, also knows as mutual TLS (default: `true`)
|
||||
- `ssl-cert-path`: path to the TLS certificate, then TLS mode is enabled (default: `/opt/capsule-proxy/tls.crt`)
|
||||
- `ssl-key-path`: path to the TLS certificate key, when TLS mode is enabled (default: `/opt/capsule-proxy/tls.key`)
|
||||
- `rolebindings-resync-period`: resync period for RoleBinding resources reflector, lower values can help if you're facing [flaky etcd connection](https://github.com/clastix/capsule-proxy/issues/174) (default: `10h`)
|
||||
|
||||
## User Authentication
|
||||
|
||||
The `capsule-proxy` intercepts all the requests from the `kubectl` client directed to the APIs Server. Users using a TLS client based authentication with certificate and key are able to talks with APIs Server since it is able to forward client certificates to the Kubernetes APIs server.
|
||||
|
||||
It is possible to protect the `capsule-proxy` using a certificate provided by Let's Encrypt. Keep in mind that, in this way, the TLS termination will be executed by the Ingress Controller, meaning that the authentication based on client certificate will be withdrawn and not reversed to the upstream.
|
||||
|
||||
If your prerequisite is exposing `capsule-proxy` using an Ingress, you must rely on the token-based authentication, for example OIDC or Bearer tokens. Users providing tokens are always able to reach the APIs Server.
|
||||
|
||||
## Kubernetes dashboards integration
|
||||
|
||||
If you're using a client-only dashboard, for example [Lens](https://k8slens.dev/), the `capsule-proxy` can be used as with `kubectl` since this dashboard usually talks to the APIs server using just a `kubeconfig` file.
|
||||
|
||||

|
||||
|
||||
For a web-based dashboard, like the [Kubernetes Dashboard](https://github.com/kubernetes/dashboard), the `capsule-proxy` can be deployed as a sidecar container in the backend, following the well-known cloud-native _Ambassador Pattern_.
|
||||
|
||||

|
||||
|
||||
## Tenant Owner Authorization
|
||||
|
||||
Each Tenant owner can have their capabilities managed pretty similar to a standard Kubernetes RBAC.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: my-tenant
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: IngressClasses
|
||||
operations:
|
||||
- List
|
||||
```
|
||||
|
||||
The proxy setting `kind` is an __enum__ accepting the supported resources:
|
||||
|
||||
- `Nodes`
|
||||
- `StorageClasses`
|
||||
- `IngressClasses`
|
||||
- `PriorityClasses`
|
||||
|
||||
Each Resource kind can be granted with several verbs, such as:
|
||||
|
||||
- `List`
|
||||
- `Update`
|
||||
- `Delete`
|
||||
|
||||
### Namespaces
|
||||
|
||||
As tenant owner `alice`, you can use `kubectl` to create some namespaces:
|
||||
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-production
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-development
|
||||
$ kubectl --context alice-oidc@mycluster create namespace gas-marketing
|
||||
```
|
||||
|
||||
and list only those namespaces:
|
||||
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster get namespaces
|
||||
NAME STATUS AGE
|
||||
gas-marketing Active 2m
|
||||
oil-development Active 2m
|
||||
oil-production Active 2m
|
||||
```
|
||||
|
||||
### Nodes
|
||||
|
||||
The Capsule Proxy gives the owners the ability to access the nodes matching the `.spec.nodeSelector` in the Tenant manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: Nodes
|
||||
operations:
|
||||
- List
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: capsule-gold-qwerty
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
capsule-gold-qwerty Ready <none> 43h v1.19.1
|
||||
```
|
||||
|
||||
> Warning: when no `nodeSelector` is specified, the tenant owners has access to all the nodes, according to the permissions listed in the `proxySettings` specs.
|
||||
|
||||
### Special routes for kubectl describe
|
||||
|
||||
When issuing a `kubectl describe node`, some other endpoints are put in place:
|
||||
|
||||
* `api/v1/pods?fieldSelector=spec.nodeName%3D{name}`
|
||||
* `/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/{name}`
|
||||
|
||||
These are mandatory in order to retrieve the list of the running Pods on the required node, and providing info about the lease status of it.
|
||||
|
||||
### Nodes
|
||||
|
||||
The Capsule Proxy gives the owners the ability to access the nodes matching the `.spec.nodeSelector` in the Tenant manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: Nodes
|
||||
operations:
|
||||
- List
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: capsule-gold-qwerty
|
||||
```
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
capsule-gold-qwerty Ready <none> 43h v1.19.1
|
||||
```
|
||||
|
||||
> Warning: when no `nodeSelector` is specified, the tenant owners has access to all the nodes, according to the permissions listed in the `proxySettings` specs.
|
||||
|
||||
### Storage Classes
|
||||
|
||||
A Tenant may be limited to use a set of allowed Storage Class resources, as follows.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: StorageClasses
|
||||
operations:
|
||||
- List
|
||||
storageClasses:
|
||||
allowed:
|
||||
- custom
|
||||
allowedRegex: "\\w+fs"
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more Storage Class resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get storageclasses
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
cephfs rook.io/cephfs Delete WaitForFirstConsumer false 21h
|
||||
custom custom.tls/provisioner Delete WaitForFirstConsumer false 43h
|
||||
default(standard) rancher.io/local-path Delete WaitForFirstConsumer false 43h
|
||||
glusterfs rook.io/glusterfs Delete WaitForFirstConsumer false 54m
|
||||
zol zfs-on-linux/zfs Delete WaitForFirstConsumer false 54m
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the `custom` Storage Class as well the other ones matching the regex `\w+fs`.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get storageclasses
|
||||
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
|
||||
cephfs rook.io/cephfs Delete WaitForFirstConsumer false 21h
|
||||
custom custom.tls/provisioner Delete WaitForFirstConsumer false 43h
|
||||
glusterfs rook.io/glusterfs Delete WaitForFirstConsumer false 54m
|
||||
```
|
||||
|
||||
> The `name` label reflecting the resource name is mandatory, otherwise filtering of resources cannot be put in place
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
labels:
|
||||
name: cephfs
|
||||
name: cephfs
|
||||
provisioner: cephfs
|
||||
|
||||
```
|
||||
|
||||
### Ingress Classes
|
||||
|
||||
As for Storage Class, also Ingress Class can be enforced.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: IngressClasses
|
||||
operations:
|
||||
- List
|
||||
ingressOptions:
|
||||
allowedClasses:
|
||||
allowed:
|
||||
- custom
|
||||
allowedRegex: "\\w+-lb"
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more Ingress Class resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get ingressclasses
|
||||
NAME CONTROLLER PARAMETERS AGE
|
||||
custom example.com/custom IngressParameters.k8s.example.com/custom 24h
|
||||
external-lb example.com/external IngressParameters.k8s.example.com/external-lb 2s
|
||||
haproxy-ingress haproxy.tech/ingress 4d
|
||||
internal-lb example.com/internal IngressParameters.k8s.example.com/external-lb 15m
|
||||
nginx nginx.plus/ingress 5d
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the `custom` Ingress Class as well the other ones matching the regex `\w+-lb`.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get ingressclasses
|
||||
NAME CONTROLLER PARAMETERS AGE
|
||||
custom example.com/custom IngressParameters.k8s.example.com/custom 24h
|
||||
external-lb example.com/external IngressParameters.k8s.example.com/external-lb 2s
|
||||
internal-lb example.com/internal IngressParameters.k8s.example.com/internal-lb 15m
|
||||
```
|
||||
|
||||
> The `name` label reflecting the resource name is mandatory, otherwise filtering of resources cannot be put in place
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: IngressClass
|
||||
metadata:
|
||||
labels:
|
||||
name: external-lb
|
||||
name: external-lb
|
||||
spec:
|
||||
controller: example.com/ingress-controller
|
||||
parameters:
|
||||
apiGroup: k8s.example.com
|
||||
kind: IngressParameters
|
||||
name: external-lb
|
||||
```
|
||||
|
||||
### Priority Classes
|
||||
|
||||
Allowed PriorityClasses assigned to a Tenant Owner can be enforced as follows:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: PriorityClasses
|
||||
operations:
|
||||
- List
|
||||
priorityClasses:
|
||||
allowed:
|
||||
- custom
|
||||
allowedRegex: "\\w+priority"
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more PriorityClasses resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get priorityclasses.scheduling.k8s.io
|
||||
NAME VALUE GLOBAL-DEFAULT AGE
|
||||
custom 1000 false 18s
|
||||
maxpriority 1000 false 18s
|
||||
minpriority 1000 false 18s
|
||||
nonallowed 1000 false 8m54s
|
||||
system-cluster-critical 2000000000 false 3h40m
|
||||
system-node-critical 2000001000 false 3h40m
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the `custom` PriorityClass as well the other ones matching the regex `\w+priority`.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get ingressclasses
|
||||
NAME VALUE GLOBAL-DEFAULT AGE
|
||||
custom 1000 false 18s
|
||||
maxpriority 1000 false 18s
|
||||
minpriority 1000 false 18s
|
||||
```
|
||||
|
||||
> The `name` label reflecting the resource name is mandatory, otherwise filtering of resources cannot be put in place
|
||||
|
||||
```yaml
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
labels:
|
||||
name: custom
|
||||
name: custom
|
||||
value: 1000
|
||||
globalDefault: false
|
||||
description: "Priority class for Tenants"
|
||||
```
|
||||
|
||||
## HTTP support
|
||||
Capsule proxy supports `https` and `http`, although the latter is not recommended, we understand that it can be useful for some use cases (i.e. development, working behind a TLS-terminated reverse proxy and so on). As the default behaviour is to work with `https`, we need to use the flag `--enable-ssl=false` if we really want to work under `http`.
|
||||
|
||||
After having the `capsule-proxy` working under `http`, requests must provide authentication using an allowed Bearer Token.
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
$ TOKEN=<type your TOKEN>
|
||||
$ curl -H "Authorization: Bearer $TOKEN" http://localhost:9001/api/v1/namespaces
|
||||
```
|
||||
|
||||
> NOTE: `kubectl` will not work against a `http` server.
|
||||
|
||||
## Contributing
|
||||
|
||||
`capsule-proxy` is an open-source software released with Apache2 [license](https://github.com/clastix/capsule-proxy/blob/master/LICENSE).
|
||||
|
||||
Contributing guidelines are available [here](https://github.com/clastix/capsule-proxy/blob/master/CONTRIBUTING.md).
|
||||
242
docs/content/general/references.md
Normal file
@@ -0,0 +1,242 @@
|
||||
# Reference
|
||||
|
||||
Reference document for Capsule Operator configuration
|
||||
|
||||
## Custom Resource Definition
|
||||
|
||||
Capsule operator uses a Custom Resources Definition (CRD) for _Tenants_. Tenants are cluster wide resources, so you need cluster level permissions to work with tenants. You can learn about tenant CRD by the `kubectl explain` command:
|
||||
|
||||
```
|
||||
kubectl explain tenant
|
||||
|
||||
KIND: Tenant
|
||||
VERSION: capsule.clastix.io/v1beta1
|
||||
|
||||
DESCRIPTION:
|
||||
Tenant is the Schema for the tenants API
|
||||
|
||||
FIELDS:
|
||||
apiVersion <string>
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value,
|
||||
and may reject unrecognized values. More info:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
|
||||
kind <string>
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated. In CamelCase. More info:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
|
||||
metadata <Object>
|
||||
Standard object's metadata. More info:
|
||||
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
|
||||
spec <Object>
|
||||
TenantSpec defines the desired state of Tenant
|
||||
|
||||
status <Object>
|
||||
Returns the observed state of the Tenant
|
||||
```
|
||||
|
||||
For Tenant spec:
|
||||
|
||||
```
|
||||
kubectl explain tenant.spec
|
||||
|
||||
KIND: Tenant
|
||||
VERSION: capsule.clastix.io/v1beta1
|
||||
|
||||
RESOURCE: spec <Object>
|
||||
|
||||
DESCRIPTION:
|
||||
TenantSpec defines the desired state of Tenant
|
||||
|
||||
FIELDS:
|
||||
additionalRoleBindings <[]Object>
|
||||
Specifies additional RoleBindings assigned to the Tenant. Capsule will
|
||||
ensure that all namespaces in the Tenant always contain the RoleBinding for
|
||||
the given ClusterRole. Optional.
|
||||
|
||||
containerRegistries <Object>
|
||||
Specifies the trusted Image Registries assigned to the Tenant. Capsule
|
||||
assures that all Pods resources created in the Tenant can use only one of
|
||||
the allowed trusted registries. Optional.
|
||||
|
||||
imagePullPolicies <[]string>
|
||||
Specify the allowed values for the imagePullPolicies option in Pod
|
||||
resources. Capsule assures that all Pod resources created in the Tenant can
|
||||
use only one of the allowed policy. Optional.
|
||||
|
||||
ingressOptions <Object>
|
||||
Specifies options for the Ingress resources, such as allowed hostnames and
|
||||
IngressClass. Optional.
|
||||
|
||||
limitRanges <Object>
|
||||
Specifies the resource min/max usage restrictions to the Tenant. The assigned
|
||||
values are inherited by any namespace created in the Tenant. Optional.
|
||||
|
||||
namespaceOptions <Object>
|
||||
Specifies options for the Namespaces, such as additional metadata or
|
||||
maximum number of namespaces allowed for that Tenant. Once the namespace
|
||||
quota assigned to the Tenant has been reached, the Tenant owner cannot
|
||||
create further namespaces. Optional.
|
||||
|
||||
networkPolicies <Object>
|
||||
Specifies the NetworkPolicies assigned to the Tenant. The assigned
|
||||
NetworkPolicies are inherited by any namespace created in the Tenant.
|
||||
Optional.
|
||||
|
||||
nodeSelector <map[string]string>
|
||||
Specifies the label to control the placement of pods on a given pool of
|
||||
worker nodes. All namesapces created within the Tenant will have the node
|
||||
selector annotation. This annotation tells the Kubernetes scheduler to
|
||||
place pods on the nodes having the selector label. Optional.
|
||||
|
||||
owners <[]Object> -required-
|
||||
Specifies the owners of the Tenant. Mandatory.
|
||||
|
||||
priorityClasses <Object>
|
||||
Specifies the allowed priorityClasses assigned to the Tenant. Capsule
|
||||
assures that all pods created in the Tenant can use only one
|
||||
of the allowed priorityClasses. Optional.
|
||||
|
||||
resourceQuotas <Object>
|
||||
Specifies a list of ResourceQuota resources assigned to the Tenant. The
|
||||
assigned values are inherited by any namespace created in the Tenant. The
|
||||
Capsule operator aggregates ResourceQuota at Tenant level, so that the hard
|
||||
quota is never crossed for the given Tenant. This permits the Tenant owner
|
||||
to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
|
||||
serviceOptions <Object>
|
||||
Specifies options for the Service, such as additional metadata or block of
|
||||
certain type of Services. Optional.
|
||||
|
||||
storageClasses <Object>
|
||||
Specifies the allowed StorageClasses assigned to the Tenant. Capsule
|
||||
assures that all PersistentVolumeClaim resources created in the Tenant can
|
||||
use only one of the allowed StorageClasses. Optional.
|
||||
```
|
||||
|
||||
and Tenant status:
|
||||
|
||||
```
|
||||
kubectl explain tenant.status
|
||||
KIND: Tenant
|
||||
VERSION: capsule.clastix.io/v1beta1
|
||||
|
||||
RESOURCE: status <Object>
|
||||
|
||||
DESCRIPTION:
|
||||
Returns the observed state of the Tenant
|
||||
|
||||
FIELDS:
|
||||
namespaces <[]string>
|
||||
List of namespaces assigned to the Tenant.
|
||||
|
||||
size <integer> -required-
|
||||
How many namespaces are assigned to the Tenant.
|
||||
|
||||
state <string> -required-
|
||||
The operational state of the Tenant. Possible values are "Active",
|
||||
"Cordoned".
|
||||
```
|
||||
|
||||
## Capsule Configuration
|
||||
|
||||
The Capsule configuration can be piloted by a Custom Resource definition named `CapsuleConfiguration`.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: CapsuleConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
annotations:
|
||||
capsule.clastix.io/ca-secret-name: "capsule-ca"
|
||||
capsule.clastix.io/tls-secret-name: "capsule-tls"
|
||||
capsule.clastix.io/mutating-webhook-configuration-name: "capsule-mutating-webhook-configuration"
|
||||
capsule.clastix.io/validating-webhook-configuration-name: "capsule-validating-webhook-configuration"
|
||||
spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
```
|
||||
|
||||
Option | Description | Default
|
||||
--- |------------------------------------------------------------------------------| ---
|
||||
`.spec.forceTenantPrefix` | Force the tenant name as prefix for namespaces: `<tenant_name>-<namespace>`. | `false`
|
||||
`.spec.userGroups` | Array of Capsule groups to which all tenant owners must belong. | `[capsule.clastix.io]`
|
||||
`.spec.protectedNamespaceRegex` | Disallows creation of namespaces matching the passed regexp. | `null`
|
||||
`.metadata.annotations.capsule.clastix.io/ca-secret-name` | Set the Capsule Certificate Authority secret name | `capsule-ca`
|
||||
`.metadata.annotations.capsule.clastic.io/tls-secret-name` | Set the Capsule TLS secret name | `capsule-tls`
|
||||
`.metadata.annotations.capsule.clastix.io/mutating-webhook-configuration-name` | Set the MutatingWebhookConfiguration name | `mutating-webhook-configuration-name`
|
||||
`.metadata.annotations.capsule.clastix.io/validating-webhook-configuration-name` | Set the ValidatingWebhookConfiguration name | `validating-webhook-configuration-name`
|
||||
|
||||
Upon installation using Kustomize or Helm, a `capsule-default` resource will be created.
|
||||
The reference to this configuration is managed by the CLI flag `--configuration-name`.
|
||||
|
||||
## Capsule Permissions
|
||||
|
||||
In the current implementation, the Capsule operator requires cluster admin permissions to fully operate. Make sure you deploy Capsule having access to the default `cluster-admin` ClusterRole.
|
||||
|
||||
## Admission Controllers
|
||||
|
||||
Capsule implements Kubernetes multi-tenancy capabilities using a minimum set of standard [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled on the Kubernetes APIs server.
|
||||
|
||||
Here the list of required Admission Controllers you have to enable to get full support from Capsule:
|
||||
|
||||
* PodNodeSelector
|
||||
* LimitRanger
|
||||
* ResourceQuota
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
In addition to the required controllers above, Capsule implements its own set through the [Dynamic Admission Controller](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) mechanism, providing callbacks to add further validation or resource patching.
|
||||
|
||||
To see Admission Controls installed by Capsule:
|
||||
|
||||
```
|
||||
$ kubectl get ValidatingWebhookConfiguration
|
||||
NAME WEBHOOKS AGE
|
||||
capsule-validating-webhook-configuration 8 2h
|
||||
|
||||
$ kubectl get MutatingWebhookConfiguration
|
||||
NAME WEBHOOKS AGE
|
||||
capsule-mutating-webhook-configuration 1 2h
|
||||
```
|
||||
|
||||
## Command Options
|
||||
|
||||
The Capsule operator provides the following command options:
|
||||
|
||||
Option | Description | Default
|
||||
--- | --- | ---
|
||||
`--metrics-addr` | The address and port where `/metrics` are exposed. | `127.0.0.1:8080`
|
||||
`--enable-leader-election` | Start a leader election client and gain leadership before executing the main loop. | `true`
|
||||
`--zap-log-level` | The log verbosity with a value from 1 to 10 or the basic keywords. | `4`
|
||||
`--zap-devel` | The flag to get the stack traces for deep debugging. | `null`
|
||||
`--configuration-name` | The Capsule Configuration CRD name, default is installed automatically | `capsule-default`
|
||||
|
||||
|
||||
## Created Resources
|
||||
|
||||
Once installed, the Capsule operator creates the following resources in your cluster:
|
||||
|
||||
```
|
||||
NAMESPACE RESOURCE
|
||||
namespace/capsule-system
|
||||
customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io
|
||||
customresourcedefinition.apiextensions.k8s.io/capsuleconfigurations.capsule.clastix.io
|
||||
clusterrole.rbac.authorization.k8s.io/capsule-proxy-role
|
||||
clusterrole.rbac.authorization.k8s.io/capsule-metrics-reader
|
||||
capsuleconfiguration.capsule.clastix.io/capsule-default
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration
|
||||
capsule-system clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding
|
||||
capsule-system clusterrolebinding.rbac.authorization.k8s.io/capsule-proxy-rolebinding
|
||||
capsule-system secret/capsule-ca
|
||||
capsule-system secret/capsule-tls
|
||||
capsule-system service/capsule-controller-manager-metrics-service
|
||||
capsule-system service/capsule-webhook-service
|
||||
capsule-system deployment.apps/capsule-controller-manager
|
||||
```
|
||||
1621
docs/content/general/tutorial.md
Normal file
BIN
docs/content/guides/assets/datasource.png
Executable file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
docs/content/guides/assets/manager-controllers.png
Executable file
|
After Width: | Height: | Size: 28 KiB |
BIN
docs/content/guides/assets/prometheus_targets.png
Executable file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/content/guides/assets/rest-client-error-rate.png
Executable file
|
After Width: | Height: | Size: 63 KiB |