Compare commits
107 Commits
v0.1.0-rc5
...
v0.1.1-rc0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b28b98a7bc | ||
|
|
f6bf0ca446 | ||
|
|
1081bad7cb | ||
|
|
79372c7332 | ||
|
|
4e8faaf845 | ||
|
|
d1b008972c | ||
|
|
a14c7609df | ||
|
|
03456c0b54 | ||
|
|
ddfe2219a0 | ||
|
|
6b68363a46 | ||
|
|
357834c5b9 | ||
|
|
085d9f6503 | ||
|
|
196e3c910d | ||
|
|
0039c91c23 | ||
|
|
26965a5ea2 | ||
|
|
422b6598ba | ||
|
|
61e6ab4088 | ||
|
|
94c6a64fcb | ||
|
|
75ebb571e4 | ||
|
|
8f3b3eac29 | ||
|
|
7979c256d9 | ||
|
|
bdafbcf90a | ||
|
|
d0530bbbe3 | ||
|
|
1035afc7fe | ||
|
|
67046c5b54 | ||
|
|
564c4db81a | ||
|
|
30c3ab078d | ||
|
|
e9b803b9cd | ||
|
|
cb8e504832 | ||
|
|
713867d916 | ||
|
|
23e55c685c | ||
|
|
6393541818 | ||
|
|
c140ab076e | ||
|
|
6b629777b7 | ||
|
|
5554ed5f32 | ||
|
|
00ef9a2f67 | ||
|
|
46c2f0e997 | ||
|
|
0c0a90a934 | ||
|
|
9d65013a22 | ||
|
|
60ab33337d | ||
|
|
225d671301 | ||
|
|
7538926bae | ||
|
|
0de0eca72a | ||
|
|
d5a702ceae | ||
|
|
a2fda44110 | ||
|
|
06330cf992 | ||
|
|
1ec9936158 | ||
|
|
694b519af8 | ||
|
|
0b34f04291 | ||
|
|
a702ef2af2 | ||
|
|
04d91af9f5 | ||
|
|
8949be7497 | ||
|
|
df08c9e63e | ||
|
|
07daffd669 | ||
|
|
3a42b90221 | ||
|
|
09277e9f3d | ||
|
|
47794c0cf8 | ||
|
|
e24394f329 | ||
|
|
01053d5deb | ||
|
|
b749e34547 | ||
|
|
82480f3afd | ||
|
|
88a9c242a4 | ||
|
|
651c62ff4a | ||
|
|
dcb8b784d5 | ||
|
|
7a698633d7 | ||
|
|
894ea5016b | ||
|
|
e4e3283b90 | ||
|
|
007f0083c2 | ||
|
|
bc6fc920d3 | ||
|
|
01b511b509 | ||
|
|
6223b1c297 | ||
|
|
d5158f06be | ||
|
|
047f4a0ff7 | ||
|
|
71cdb45925 | ||
|
|
9182895811 | ||
|
|
2eceb0935a | ||
|
|
8ead555743 | ||
|
|
57bf3d1c1b | ||
|
|
bb58e90f5d | ||
|
|
f8fa87a998 | ||
|
|
b3658b7bfc | ||
|
|
54d0201161 | ||
|
|
44ffe0ddf5 | ||
|
|
491ab71842 | ||
|
|
4e9dbf8690 | ||
|
|
34614015a0 | ||
|
|
737fb26e39 | ||
|
|
b56015922f | ||
|
|
ddb9ffd79e | ||
|
|
cae65c9f84 | ||
|
|
befcf65bdd | ||
|
|
e1d98334a2 | ||
|
|
848c6d99c2 | ||
|
|
bd12068397 | ||
|
|
4604e44c37 | ||
|
|
31863b53af | ||
|
|
7a055fcb9f | ||
|
|
29ab5ca64a | ||
|
|
c52f7844db | ||
|
|
9244122d42 | ||
|
|
f883e7b662 | ||
|
|
2f5f31b678 | ||
|
|
e7ef9642ad | ||
|
|
34f73af5c4 | ||
|
|
18912a002b | ||
|
|
d43ad2f9f8 | ||
|
|
9a595877ce |
22
.github/workflows/ci.yml
vendored
@@ -7,6 +7,15 @@ on:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
commit_lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: wagoid/commitlint-github-action@v2
|
||||
with:
|
||||
firstParent: true
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
@@ -25,18 +34,9 @@ jobs:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: go-mod
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
go-version: '^1.16'
|
||||
- run: make installer
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
|
||||
|
||||
50
.github/workflows/docker-ci.yml
vendored
@@ -10,12 +10,27 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
-
|
||||
name: Docker meta
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
GIT_MODIFIED_2=$(git diff --quiet && echo "" || echo ".dirty")
|
||||
# Export to GH_ENV
|
||||
echo "GIT_LAST_TAG=$VERSION" >> $GITHUB_ENV
|
||||
echo "GIT_HEAD_COMMIT=$GIT_HEAD_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_TAG_COMMIT=$GIT_TAG_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_MODIFIED=$(echo "$GIT_MODIFIED_1""$GIT_MODIFIED_2")" >> $GITHUB_ENV
|
||||
echo "GIT_REPO=$(git config --get remote.origin.url)" >> $GITHUB_ENV
|
||||
echo "BUILD_DATE=$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
@@ -26,22 +41,19 @@ jobs:
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
-
|
||||
name: Set up QEMU
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64,arm
|
||||
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
-
|
||||
name: Inspect builder
|
||||
- name: Inspect builder
|
||||
run: |
|
||||
echo "Name: ${{ steps.buildx.outputs.name }}"
|
||||
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
|
||||
@@ -49,16 +61,14 @@ jobs:
|
||||
echo "Flags: ${{ steps.buildx.outputs.flags }}"
|
||||
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
|
||||
|
||||
-
|
||||
name: Login to quay.io Container Registry
|
||||
- name: Login to quay.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ github.repository_owner }}+github
|
||||
password: ${{ secrets.BOT_QUAY_IO }}
|
||||
|
||||
-
|
||||
name: Build and push
|
||||
- name: Build and push
|
||||
id: build-release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -67,7 +77,13 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
|
||||
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
|
||||
GIT_REPO=${{ env.GIT_REPO }}
|
||||
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
|
||||
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
|
||||
-
|
||||
name: Image digest
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
|
||||
19
.github/workflows/e2e.yml
vendored
@@ -11,25 +11,15 @@ jobs:
|
||||
name: Kubernetes
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.0']
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.0']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules and Docker images
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: gomod-docker
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/var/lib/docker
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ matrix.k8s-version }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ matrix.k8s-version }}-build-
|
||||
${{ matrix.k8s-version }}-
|
||||
go-version: '^1.16'
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
@@ -39,10 +29,11 @@ jobs:
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.8'
|
||||
go-version: '^1.16'
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
version: v0.11.1
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
|
||||
2
.github/workflows/helm.yml
vendored
@@ -5,6 +5,8 @@ on:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
create:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
|
||||
2
Makefile
@@ -1,5 +1,5 @@
|
||||
# Current Operator version
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags)
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags --match "v*")
|
||||
|
||||
# Default bundle image tag
|
||||
BUNDLE_IMG ?= quay.io/clastix/capsule:$(VERSION)-bundle
|
||||
|
||||
66
README.md
@@ -13,7 +13,7 @@
|
||||
|
||||
---
|
||||
|
||||
# Kubernetes multi-tenancy made simple
|
||||
# Kubernetes multi-tenancy made easy
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
@@ -71,36 +71,24 @@ Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
namespace/capsule-system created
|
||||
customresourcedefinition.apiextensions.k8s.io/capsuleconfigurations.capsule.clastix.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding created
|
||||
secret/capsule-ca created
|
||||
secret/capsule-tls created
|
||||
service/capsule-controller-manager-metrics-service created
|
||||
service/capsule-webhook-service created
|
||||
deployment.apps/capsule-controller-manager created
|
||||
capsuleconfiguration.capsule.clastix.io/capsule-default created
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration created
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration created
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
## How to create Tenants
|
||||
Use the scaffold [Tenant](config/samples/capsule_v1alpha1_tenant.yaml) and simply apply as cluster admin.
|
||||
Use the scaffold [Tenant](config/samples/capsule_v1beta1_tenant.yaml) and simply apply as cluster admin.
|
||||
|
||||
```
|
||||
$ kubectl apply -f config/samples/capsule_v1alpha1_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
$ kubectl apply -f config/samples/capsule_v1beta1_tenant.yaml
|
||||
tenant.capsule.clastix.io/gas created
|
||||
```
|
||||
|
||||
You can check the tenant just created as
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 0 alice User 1m
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
gas Active 3 0 {"kubernetes.io/os":"linux"} 25s
|
||||
```
|
||||
|
||||
## Tenant owners
|
||||
@@ -112,52 +100,46 @@ Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
Users authenticated through an _OIDC token_ must have in their token:
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
in their token.
|
||||
|
||||
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
|
||||
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `bob` user acting as owner of a tenant called `gas`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh alice oil
|
||||
creating certs in TMPDIR /tmp/tmp.4CLgpuime3
|
||||
Generating RSA private key, 2048 bit long modulus (2 primes)
|
||||
............+++++
|
||||
........................+++++
|
||||
e is 65537 (0x010001)
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil created
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil approved
|
||||
kubeconfig file is: alice-oil.kubeconfig
|
||||
to use it as alice export KUBECONFIG=alice-oil.kubeconfig
|
||||
./hack/create-user.sh bob gas
|
||||
...
|
||||
certificatesigningrequest.certificates.k8s.io/bob-gas created
|
||||
certificatesigningrequest.certificates.k8s.io/bob-gas approved
|
||||
kubeconfig file is: bob-gas.kubeconfig
|
||||
to use it as bob export KUBECONFIG=bob-gas.kubeconfig
|
||||
```
|
||||
|
||||
## Working with Tenants
|
||||
Log in to the Kubernetes cluster as `alice` tenant owner
|
||||
Log in to the Kubernetes cluster as `bob` tenant owner
|
||||
|
||||
```
|
||||
$ export KUBECONFIG=alice-oil.kubeconfig
|
||||
$ export KUBECONFIG=bob-gas.kubeconfig
|
||||
```
|
||||
|
||||
and create a couple of new namespaces
|
||||
|
||||
```
|
||||
$ kubectl create namespace oil-production
|
||||
$ kubectl create namespace oil-development
|
||||
$ kubectl create namespace gas-production
|
||||
$ kubectl create namespace gas-development
|
||||
```
|
||||
|
||||
As user `alice` you can operate with fully admin permissions:
|
||||
As user `bob` you can operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
$ kubectl -n gas-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n gas-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
@@ -165,7 +147,7 @@ but limited to only your own namespaces:
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden:
|
||||
User "alice" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
User "bob" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
# Documentation
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
)
|
||||
|
||||
// CapsuleConfigurationSpec defines the Capsule configuration
|
||||
// nolint:maligned
|
||||
type CapsuleConfigurationSpec struct {
|
||||
// Names of the groups for Capsule users.
|
||||
// +kubebuilder:default={capsule.clastix.io}
|
||||
@@ -19,15 +18,6 @@ type CapsuleConfigurationSpec struct {
|
||||
ForceTenantPrefix bool `json:"forceTenantPrefix,omitempty"`
|
||||
// Disallow creation of namespaces, whose name matches this regexp
|
||||
ProtectedNamespaceRegexpString string `json:"protectedNamespaceRegex,omitempty"`
|
||||
// When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed.
|
||||
// Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of
|
||||
// two or more Tenant resources although sharing the same allowed hostname(s).
|
||||
//
|
||||
// The JSON path of the resource is: /spec/ingressHostnames/allowed
|
||||
AllowTenantIngressHostnamesCollision bool `json:"allowTenantIngressHostnamesCollision,omitempty"`
|
||||
// Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
// +kubebuilder:default=true
|
||||
AllowIngressHostnameCollision bool `json:"allowIngressHostnameCollision,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
@@ -17,6 +17,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
resourceQuotaScopeAnnotation = "capsule.clastix.io/resource-quota-scope"
|
||||
|
||||
podAllowedImagePullPolicyAnnotation = "capsule.clastix.io/allowed-image-pull-policy"
|
||||
|
||||
podPriorityAllowedAnnotation = "priorityclass.capsule.clastix.io/allowed"
|
||||
@@ -24,6 +26,7 @@ const (
|
||||
|
||||
enableNodePortsAnnotation = "capsule.clastix.io/enable-node-ports"
|
||||
enableExternalNameAnnotation = "capsule.clastix.io/enable-external-name"
|
||||
enableLoadBalancerAnnotation = "capsule.clastix.io/enable-loadbalancer-service"
|
||||
|
||||
ownerGroupsAnnotation = "owners.capsule.clastix.io/group"
|
||||
ownerUsersAnnotation = "owners.capsule.clastix.io/user"
|
||||
@@ -41,6 +44,8 @@ const (
|
||||
enablePriorityClassListingAnnotation = "capsule.clastix.io/enable-priorityclass-listing"
|
||||
enablePriorityClassUpdateAnnotation = "capsule.clastix.io/enable-priorityclass-update"
|
||||
enablePriorityClassDeletionAnnotation = "capsule.clastix.io/enable-priorityclass-deletion"
|
||||
|
||||
ingressHostnameCollisionScope = "ingress.capsule.clastix.io/hostname-collision-scope"
|
||||
)
|
||||
|
||||
func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
@@ -132,23 +137,32 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.ObjectMeta = t.ObjectMeta
|
||||
|
||||
// Spec
|
||||
dst.Spec.NamespaceQuota = t.Spec.NamespaceQuota
|
||||
if t.Spec.NamespaceQuota != nil {
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.Quota = t.Spec.NamespaceQuota
|
||||
}
|
||||
|
||||
dst.Spec.NodeSelector = t.Spec.NodeSelector
|
||||
|
||||
dst.Spec.Owners = t.convertV1Alpha1OwnerToV1Beta1()
|
||||
|
||||
if t.Spec.NamespacesMetadata != nil {
|
||||
dst.Spec.NamespacesMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.AdditionalMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
}
|
||||
}
|
||||
if t.Spec.ServicesMetadata != nil {
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
Labels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -159,14 +173,22 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
Regex: t.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if v, ok := t.Annotations[ingressHostnameCollisionScope]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.HostnameCollisionScopeCluster), string(capsulev1beta1.HostnameCollisionScopeTenant), string(capsulev1beta1.HostnameCollisionScopeNamespace):
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScope(v)
|
||||
default:
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScopeDisabled
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressClasses != nil {
|
||||
dst.Spec.IngressClasses = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedClasses = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressClasses.Exact,
|
||||
Regex: t.Spec.IngressClasses.Regex,
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressHostnames != nil {
|
||||
dst.Spec.IngressHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressHostnames.Exact,
|
||||
Regex: t.Spec.IngressHostnames.Regex,
|
||||
}
|
||||
@@ -189,6 +211,17 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
if len(t.Spec.ResourceQuota) > 0 {
|
||||
dst.Spec.ResourceQuota = &capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: func() capsulev1beta1.ResourceQuotaScope {
|
||||
if v, ok := t.GetAnnotations()[resourceQuotaScopeAnnotation]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.ResourceQuotaScopeNamespace):
|
||||
return capsulev1beta1.ResourceQuotaScopeNamespace
|
||||
case string(capsulev1beta1.ResourceQuotaScopeTenant):
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}
|
||||
}
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}(),
|
||||
Items: t.Spec.ResourceQuota,
|
||||
}
|
||||
}
|
||||
@@ -201,12 +234,15 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
if t.Spec.ExternalServiceIPs != nil {
|
||||
dst.Spec.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: make([]capsulev1beta1.AllowedIP, len(t.Spec.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
for i, IP := range t.Spec.ExternalServiceIPs.Allowed {
|
||||
dst.Spec.ExternalServiceIPs.Allowed[i] = capsulev1beta1.AllowedIP(IP)
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs.Allowed[i] = capsulev1beta1.AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,6 +298,21 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.Spec.ServiceOptions.AllowedServices.ExternalName = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
loadBalancerService, ok := annotations[enableLoadBalancerAnnotation]
|
||||
if ok {
|
||||
val, err := strconv.ParseBool(loadBalancerService)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableLoadBalancerAnnotation, t.GetName()))
|
||||
}
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.AllowedServices.LoadBalancer = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
// Status
|
||||
dst.Status = capsulev1beta1.TenantStatus{
|
||||
Size: t.Status.Size,
|
||||
@@ -274,6 +325,7 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, podPriorityAllowedRegexAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableNodePortsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableExternalNameAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableLoadBalancerAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerGroupsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerUsersAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerServiceAccountAnnotation)
|
||||
@@ -289,6 +341,8 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassListingAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassUpdateAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassDeletionAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, resourceQuotaScopeAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ingressHostnameCollisionScope)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -397,7 +451,10 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.ObjectMeta = src.ObjectMeta
|
||||
|
||||
// Spec
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceQuota
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.Quota != nil {
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceOptions.Quota
|
||||
}
|
||||
|
||||
t.Spec.NodeSelector = src.Spec.NodeSelector
|
||||
|
||||
if t.Annotations == nil {
|
||||
@@ -406,16 +463,16 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
|
||||
t.convertV1Beta1OwnerToV1Alpha1(src)
|
||||
|
||||
if src.Spec.NamespacesMetadata != nil {
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.NamespacesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.NamespaceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.NamespaceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.ServicesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.StorageClasses != nil {
|
||||
@@ -424,16 +481,17 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
Regex: src.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressClasses != nil {
|
||||
t.Annotations[ingressHostnameCollisionScope] = string(src.Spec.IngressOptions.HostnameCollisionScope)
|
||||
if src.Spec.IngressOptions.AllowedClasses != nil {
|
||||
t.Spec.IngressClasses = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressClasses.Exact,
|
||||
Regex: src.Spec.IngressClasses.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedClasses.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressHostnames != nil {
|
||||
if src.Spec.IngressOptions.AllowedHostnames != nil {
|
||||
t.Spec.IngressHostnames = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressHostnames.Exact,
|
||||
Regex: src.Spec.IngressHostnames.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedHostnames.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedHostnames.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.ContainerRegistries != nil {
|
||||
@@ -449,6 +507,7 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.Spec.LimitRanges = src.Spec.LimitRanges.Items
|
||||
}
|
||||
if src.Spec.ResourceQuota != nil {
|
||||
t.Annotations[resourceQuotaScopeAnnotation] = string(src.Spec.ResourceQuota.Scope)
|
||||
t.Spec.ResourceQuota = src.Spec.ResourceQuota.Items
|
||||
}
|
||||
if len(src.Spec.AdditionalRoleBindings) > 0 {
|
||||
@@ -459,12 +518,12 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
})
|
||||
}
|
||||
}
|
||||
if src.Spec.ExternalServiceIPs != nil {
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.ExternalServiceIPs != nil {
|
||||
t.Spec.ExternalServiceIPs = &ExternalServiceIPsSpec{
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ExternalServiceIPs.Allowed)),
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ServiceOptions.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
for i, IP := range src.Spec.ExternalServiceIPs.Allowed {
|
||||
for i, IP := range src.Spec.ServiceOptions.ExternalServiceIPs.Allowed {
|
||||
t.Spec.ExternalServiceIPs.Allowed[i] = AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
@@ -488,6 +547,7 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AllowedServices != nil {
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
t.Annotations[enableLoadBalancerAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.LoadBalancer)
|
||||
}
|
||||
|
||||
// Status
|
||||
|
||||
@@ -36,18 +36,26 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Regex: "^foo*",
|
||||
}
|
||||
var v1beta1AdditionalMetadataSpec = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: map[string]string{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
AdditionalAnnotations: map[string]string{
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
var v1beta1NamespaceOptions = &capsulev1beta1.NamespaceOptions{
|
||||
Quota: &namespaceQuota,
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
}
|
||||
var v1beta1ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
NodePort: pointer.BoolPtr(false),
|
||||
ExternalName: pointer.BoolPtr(false),
|
||||
LoadBalancer: pointer.BoolPtr(false),
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
}
|
||||
var v1beta1AllowedListSpec = &capsulev1beta1.AllowedListSpec{
|
||||
@@ -222,12 +230,14 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
NamespaceQuota: &namespaceQuota,
|
||||
NamespacesMetadata: v1beta1AdditionalMetadataSpec,
|
||||
ServiceOptions: v1beta1ServiceOptions,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressClasses: v1beta1AllowedListSpec,
|
||||
IngressHostnames: v1beta1AllowedListSpec,
|
||||
NamespaceOptions: v1beta1NamespaceOptions,
|
||||
ServiceOptions: v1beta1ServiceOptions,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeDisabled,
|
||||
AllowedClasses: v1beta1AllowedListSpec,
|
||||
AllowedHostnames: v1beta1AllowedListSpec,
|
||||
},
|
||||
ContainerRegistries: v1beta1AllowedListSpec,
|
||||
NodeSelector: nodeSelector,
|
||||
NetworkPolicies: &capsulev1beta1.NetworkPolicySpec{
|
||||
@@ -237,6 +247,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Items: limitRanges,
|
||||
},
|
||||
ResourceQuota: &capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: capsulev1beta1.ResourceQuotaScopeNamespace,
|
||||
Items: resourceQuotas,
|
||||
},
|
||||
AdditionalRoleBindings: []capsulev1beta1.AdditionalRoleBindingsSpec{
|
||||
@@ -251,9 +262,6 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
ImagePullPolicies: []capsulev1beta1.ImagePullPolicySpec{"Always", "IfNotPresent"},
|
||||
PriorityClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{"default"},
|
||||
@@ -278,6 +286,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
podAllowedImagePullPolicyAnnotation: "Always,IfNotPresent",
|
||||
enableExternalNameAnnotation: "false",
|
||||
enableNodePortsAnnotation: "false",
|
||||
enableLoadBalancerAnnotation: "false",
|
||||
podPriorityAllowedAnnotation: "default",
|
||||
podPriorityAllowedRegexAnnotation: "^tier-.*$",
|
||||
ownerGroupsAnnotation: "owner-foo,owner-bar",
|
||||
@@ -292,6 +301,8 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
enableIngressClassUpdateAnnotation: "alice,bob",
|
||||
enableIngressClassDeletionAnnotation: "alice,jack",
|
||||
enablePriorityClassListingAnnotation: "jack",
|
||||
resourceQuotaScopeAnnotation: "Namespace",
|
||||
ingressHostnameCollisionScope: "Disabled",
|
||||
},
|
||||
},
|
||||
Spec: TenantSpec{
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
package v1beta1
|
||||
|
||||
type AdditionalMetadataSpec struct {
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
AdditionalAnnotations map[string]string `json:"additionalAnnotations,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
|
||||
15
api/v1beta1/deny_wildcard.go
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
denyWildcard = "capsule.clastix.io/deny-wildcard"
|
||||
)
|
||||
|
||||
func (t *Tenant) IsWildcardDenied() bool {
|
||||
if v, ok := t.Annotations[denyWildcard]; ok && v == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
33
api/v1beta1/forbidden_list.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ForbiddenListSpec struct {
|
||||
Exact []string `json:"denied,omitempty"`
|
||||
Regex string `json:"deniedRegex,omitempty"`
|
||||
}
|
||||
|
||||
func (in *ForbiddenListSpec) ExactMatch(value string) (ok bool) {
|
||||
if len(in.Exact) > 0 {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (in ForbiddenListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
return
|
||||
}
|
||||
67
api/v1beta1/forbidden_list_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
//nolint:dupl
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestForbiddenListSpec_ExactMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
In []string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"bing", "bong"},
|
||||
},
|
||||
{
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
nil,
|
||||
[]string{"any", "value"},
|
||||
},
|
||||
} {
|
||||
a := ForbiddenListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestForbiddenListSpec_RegexMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
Regex string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
} {
|
||||
a := ForbiddenListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
14
api/v1beta1/hostname_collision_scope.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
HostnameCollisionScopeCluster HostnameCollisionScope = "Cluster"
|
||||
HostnameCollisionScopeTenant HostnameCollisionScope = "Tenant"
|
||||
HostnameCollisionScopeNamespace HostnameCollisionScope = "Namespace"
|
||||
HostnameCollisionScopeDisabled HostnameCollisionScope = "Disabled"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Cluster;Tenant;Namespace;Disabled
|
||||
type HostnameCollisionScope string
|
||||
24
api/v1beta1/ingress_options.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type IngressOptions struct {
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
AllowedClasses *AllowedListSpec `json:"allowedClasses,omitempty"`
|
||||
// Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames.
|
||||
//
|
||||
//
|
||||
// - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule.
|
||||
//
|
||||
// - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant.
|
||||
//
|
||||
// - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace.
|
||||
//
|
||||
//
|
||||
// Optional.
|
||||
// +kubebuilder:default=Disabled
|
||||
HostnameCollisionScope HostnameCollisionScope `json:"hostnameCollisionScope,omitempty"`
|
||||
// Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
AllowedHostnames *AllowedListSpec `json:"allowedHostnames,omitempty"`
|
||||
}
|
||||
51
api/v1beta1/namespace_options.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package v1beta1
|
||||
|
||||
import "strings"
|
||||
|
||||
type NamespaceOptions struct {
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
Quota *int32 `json:"quota,omitempty"`
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
}
|
||||
|
||||
func (t *Tenant) hasForbiddenNamespaceLabelsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Tenant) hasForbiddenNamespaceAnnotationsAnnotations() bool {
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *Tenant) ForbiddenUserNamespaceLabels() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceLabelsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceLabelsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceLabelsRegexpAnnotation],
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Tenant) ForbiddenUserNamespaceAnnotations() *ForbiddenListSpec {
|
||||
if !t.hasForbiddenNamespaceAnnotationsAnnotations() {
|
||||
return nil
|
||||
}
|
||||
return &ForbiddenListSpec{
|
||||
Exact: strings.Split(t.Annotations[ForbiddenNamespaceAnnotationsAnnotation], ","),
|
||||
Regex: t.Annotations[ForbiddenNamespaceAnnotationsRegexpAnnotation],
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,17 @@ package v1beta1
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
// +kubebuilder:validation:Enum=Tenant;Namespace
|
||||
type ResourceQuotaScope string
|
||||
|
||||
const (
|
||||
ResourceQuotaScopeTenant ResourceQuotaScope = "Tenant"
|
||||
ResourceQuotaScopeNamespace ResourceQuotaScope = "Namespace"
|
||||
)
|
||||
|
||||
type ResourceQuotaSpec struct {
|
||||
// +kubebuilder:default=Tenant
|
||||
// Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
Scope ResourceQuotaScope `json:"scope,omitempty"`
|
||||
Items []corev1.ResourceQuotaSpec `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
16
api/v1beta1/service_allowed_types.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type AllowedServices struct {
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
NodePort *bool `json:"nodePort,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
ExternalName *bool `json:"externalName,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
LoadBalancer *bool `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
@@ -8,13 +8,6 @@ type ServiceOptions struct {
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
// Block or deny certain type of Services. Optional.
|
||||
AllowedServices *AllowedServices `json:"allowedServices,omitempty"`
|
||||
}
|
||||
|
||||
type AllowedServices struct {
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
NodePort *bool `json:"nodePort,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
ExternalName *bool `json:"externalName,omitempty"`
|
||||
// Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalIPs,omitempty"`
|
||||
}
|
||||
|
||||
@@ -8,12 +8,16 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
AvailableIngressClassesAnnotation = "capsule.clastix.io/ingress-classes"
|
||||
AvailableIngressClassesRegexpAnnotation = "capsule.clastix.io/ingress-classes-regexp"
|
||||
AvailableStorageClassesAnnotation = "capsule.clastix.io/storage-classes"
|
||||
AvailableStorageClassesRegexpAnnotation = "capsule.clastix.io/storage-classes-regexp"
|
||||
AllowedRegistriesAnnotation = "capsule.clastix.io/allowed-registries"
|
||||
AllowedRegistriesRegexpAnnotation = "capsule.clastix.io/allowed-registries-regexp"
|
||||
AvailableIngressClassesAnnotation = "capsule.clastix.io/ingress-classes"
|
||||
AvailableIngressClassesRegexpAnnotation = "capsule.clastix.io/ingress-classes-regexp"
|
||||
AvailableStorageClassesAnnotation = "capsule.clastix.io/storage-classes"
|
||||
AvailableStorageClassesRegexpAnnotation = "capsule.clastix.io/storage-classes-regexp"
|
||||
AllowedRegistriesAnnotation = "capsule.clastix.io/allowed-registries"
|
||||
AllowedRegistriesRegexpAnnotation = "capsule.clastix.io/allowed-registries-regexp"
|
||||
ForbiddenNamespaceLabelsAnnotation = "capsule.clastix.io/forbidden-namespace-labels"
|
||||
ForbiddenNamespaceLabelsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-labels-regexp"
|
||||
ForbiddenNamespaceAnnotationsAnnotation = "capsule.clastix.io/forbidden-namespace-annotations"
|
||||
ForbiddenNamespaceAnnotationsRegexpAnnotation = "capsule.clastix.io/forbidden-namespace-annotations-regexp"
|
||||
)
|
||||
|
||||
func UsedQuotaFor(resource fmt.Stringer) string {
|
||||
|
||||
@@ -18,10 +18,10 @@ func (t *Tenant) IsCordoned() bool {
|
||||
|
||||
func (t *Tenant) IsFull() bool {
|
||||
// we don't have limits on assigned Namespaces
|
||||
if t.Spec.NamespaceQuota == nil {
|
||||
if t.Spec.NamespaceOptions == nil || t.Spec.NamespaceOptions.Quota == nil {
|
||||
return false
|
||||
}
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceQuota)
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceOptions.Quota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
|
||||
@@ -3,18 +3,18 @@
|
||||
|
||||
package v1beta1
|
||||
|
||||
// +kubebuilder:validation:Enum=cordoned;active
|
||||
// +kubebuilder:validation:Enum=Cordoned;Active
|
||||
type tenantState string
|
||||
|
||||
const (
|
||||
TenantStateActive tenantState = "active"
|
||||
TenantStateCordoned tenantState = "cordoned"
|
||||
TenantStateActive tenantState = "Active"
|
||||
TenantStateCordoned tenantState = "Cordoned"
|
||||
)
|
||||
|
||||
// Returns the observed state of the Tenant
|
||||
type TenantStatus struct {
|
||||
//+kubebuilder:default=active
|
||||
// The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
//+kubebuilder:default=Active
|
||||
// The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
State tenantState `json:"state"`
|
||||
// How many namespaces are assigned to the Tenant.
|
||||
Size uint `json:"size"`
|
||||
|
||||
@@ -11,20 +11,14 @@ import (
|
||||
type TenantSpec struct {
|
||||
// Specifies the owners of the Tenant. Mandatory.
|
||||
Owners OwnerListSpec `json:"owners"`
|
||||
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
NamespaceQuota *int32 `json:"namespaceQuota,omitempty"`
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
NamespacesMetadata *AdditionalMetadataSpec `json:"namespacesMetadata,omitempty"`
|
||||
// Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
NamespaceOptions *NamespaceOptions `json:"namespaceOptions,omitempty"`
|
||||
// Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
ServiceOptions *ServiceOptions `json:"serviceOptions,omitempty"`
|
||||
// Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
StorageClasses *AllowedListSpec `json:"storageClasses,omitempty"`
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
IngressClasses *AllowedListSpec `json:"ingressClasses,omitempty"`
|
||||
// Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
IngressHostnames *AllowedListSpec `json:"ingressHostnames,omitempty"`
|
||||
// Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
IngressOptions IngressOptions `json:"ingressOptions,omitempty"`
|
||||
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
@@ -37,11 +31,9 @@ type TenantSpec struct {
|
||||
ResourceQuota *ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
// Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
AdditionalRoleBindings []AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
|
||||
// Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalServiceIPs,omitempty"`
|
||||
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
ImagePullPolicies []ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
// Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
PriorityClasses *AllowedListSpec `json:"priorityClasses,omitempty"`
|
||||
}
|
||||
|
||||
@@ -50,7 +42,7 @@ type TenantSpec struct {
|
||||
//+kubebuilder:storageversion
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=tnt
|
||||
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="The actual state of the Tenant"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceQuota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceOptions.quota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace count",type="integer",JSONPath=".status.size",description="The total amount of Namespaces in use"
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
@@ -17,15 +17,15 @@ import (
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalMetadataSpec) DeepCopyInto(out *AdditionalMetadataSpec) {
|
||||
*out = *in
|
||||
if in.AdditionalLabels != nil {
|
||||
in, out := &in.AdditionalLabels, &out.AdditionalLabels
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AdditionalAnnotations != nil {
|
||||
in, out := &in.AdditionalAnnotations, &out.AdditionalAnnotations
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
@@ -96,6 +96,11 @@ func (in *AllowedServices) DeepCopyInto(out *AllowedServices) {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.LoadBalancer != nil {
|
||||
in, out := &in.LoadBalancer, &out.LoadBalancer
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedServices.
|
||||
@@ -149,6 +154,51 @@ func (in *ExternalServiceIPsSpec) DeepCopy() *ExternalServiceIPsSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ForbiddenListSpec) DeepCopyInto(out *ForbiddenListSpec) {
|
||||
*out = *in
|
||||
if in.Exact != nil {
|
||||
in, out := &in.Exact, &out.Exact
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForbiddenListSpec.
|
||||
func (in *ForbiddenListSpec) DeepCopy() *ForbiddenListSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ForbiddenListSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressOptions) DeepCopyInto(out *IngressOptions) {
|
||||
*out = *in
|
||||
if in.AllowedClasses != nil {
|
||||
in, out := &in.AllowedClasses, &out.AllowedClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AllowedHostnames != nil {
|
||||
in, out := &in.AllowedHostnames, &out.AllowedHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressOptions.
|
||||
func (in *IngressOptions) DeepCopy() *IngressOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitRangesSpec) DeepCopyInto(out *LimitRangesSpec) {
|
||||
*out = *in
|
||||
@@ -171,6 +221,31 @@ func (in *LimitRangesSpec) DeepCopy() *LimitRangesSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespaceOptions) DeepCopyInto(out *NamespaceOptions) {
|
||||
*out = *in
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalMetadata != nil {
|
||||
in, out := &in.AdditionalMetadata, &out.AdditionalMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceOptions.
|
||||
func (in *NamespaceOptions) DeepCopy() *NamespaceOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespaceOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
|
||||
*out = *in
|
||||
@@ -291,6 +366,11 @@ func (in *ServiceOptions) DeepCopyInto(out *ServiceOptions) {
|
||||
*out = new(AllowedServices)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceOptions.
|
||||
@@ -372,14 +452,9 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NamespaceQuota != nil {
|
||||
in, out := &in.NamespaceQuota, &out.NamespaceQuota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NamespacesMetadata != nil {
|
||||
in, out := &in.NamespacesMetadata, &out.NamespacesMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
if in.NamespaceOptions != nil {
|
||||
in, out := &in.NamespaceOptions, &out.NamespaceOptions
|
||||
*out = new(NamespaceOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ServiceOptions != nil {
|
||||
@@ -392,16 +467,7 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressClasses != nil {
|
||||
in, out := &in.IngressClasses, &out.IngressClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressHostnames != nil {
|
||||
in, out := &in.IngressHostnames, &out.IngressHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.IngressOptions.DeepCopyInto(&out.IngressOptions)
|
||||
if in.ContainerRegistries != nil {
|
||||
in, out := &in.ContainerRegistries, &out.ContainerRegistries
|
||||
*out = new(AllowedListSpec)
|
||||
@@ -436,11 +502,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ImagePullPolicies != nil {
|
||||
in, out := &in.ImagePullPolicies, &out.ImagePullPolicies
|
||||
*out = make([]ImagePullPolicySpec, len(*in))
|
||||
|
||||
@@ -21,8 +21,8 @@ sources:
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.0.19
|
||||
version: 0.1.1
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.0.5
|
||||
appVersion: 0.1.0
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Deploying the Capsule Operator
|
||||
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining mutitenancy and access control in Kubernetes.
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining multitenancy and access control in Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
@@ -24,19 +24,23 @@ The Capsule Operator Chart can be used to instantly deploy the Capsule Operator
|
||||
|
||||
$ helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
2. Install the Chart:
|
||||
2. Create the Namespace:
|
||||
|
||||
$ kubectl create namespace capsule-system
|
||||
|
||||
3. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system
|
||||
|
||||
3. Show the status:
|
||||
4. Show the status:
|
||||
|
||||
$ helm status capsule -n capsule-system
|
||||
|
||||
4. Upgrade the Chart
|
||||
5. Upgrade the Chart
|
||||
|
||||
$ helm upgrade capsule clastix/capsule -n capsule-system
|
||||
|
||||
5. Uninstall the Chart
|
||||
6. Uninstall the Chart
|
||||
|
||||
$ helm uninstall capsule -n capsule-system
|
||||
|
||||
@@ -63,10 +67,8 @@ Parameter | Description | Default
|
||||
`manager.hostNetwork` | Specifies if the container should be started in `hostNetwork` mode. | `false`
|
||||
`manager.options.logLevel` | Set the log verbosity of the controller with a value from 1 to 10.| `4`
|
||||
`manager.options.forceTenantPrefix` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash | `false`
|
||||
`manager.options.capsuleUserGroup` | Override the Capsule user group | `capsule.clastix.io`
|
||||
`manager.options.capsuleUserGroups` | Override the Capsule user groups | `[capsule.clastix.io]`
|
||||
`manager.options.protectedNamespaceRegex` | If specified, disallows creation of namespaces matching the passed regexp | `null`
|
||||
`manager.options.allowIngressHostnameCollision` | Allow the Ingress hostname collision at Ingress resource level across all the Tenants | `true`
|
||||
`manager.options.allowTenantIngressHostnamesCollision` | Skip the validation check at Tenant level for colliding Ingress hostnames | `false`
|
||||
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
|
||||
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
|
||||
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
@@ -89,19 +91,24 @@ Parameter | Description | Default
|
||||
`replicaCount` | Set the replica count for Capsule pod. | `1`
|
||||
`affinity` | Set affinity rules for the Capsule pod. | `{}`
|
||||
`podSecurityPolicy.enabled` | Specify if a Pod Security Policy must be created. | `false`
|
||||
`serviceMonitor.enabled` | Specify if a Service Monitor must be created. | `false`
|
||||
`serviceMonitor.serviceAccount.name` | Specify Service Account name for metrics scrape. | `capsule`
|
||||
`serviceMonitor.serviceAccount.namespace` | Specify Service Account namespace for metrics scrape. | `capsule-system`
|
||||
`serviceMonitor.enabled` | Specifies if a service monitor must be created. | `false`
|
||||
`serviceMonitor.labels` | Additional labels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.annotations` | Additional annotations which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.matchLabels` | Additional matchLabels which will be added to service monitor. | `{}`
|
||||
`serviceMonitor.serviceAccount.name` | Specifies service account name for metrics scrape. | `capsule`
|
||||
`serviceMonitor.serviceAccount.namespace` | Specifies service account namespace for metrics scrape. | `capsule-system`
|
||||
`customLabels` | Additional labels which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
`customAnnotations` | Additional annotations which will be added to all resources created by Capsule helm chart . | `{}`
|
||||
|
||||
## Created resources
|
||||
|
||||
This Helm Chart cretes the following Kubernetes resources in the release namespace:
|
||||
This Helm Chart creates the following Kubernetes resources in the release namespace:
|
||||
|
||||
* Capsule Namespace
|
||||
* Capsule Operator Deployment
|
||||
* Capsule Service
|
||||
* CA Secret
|
||||
* Certfificate Secret
|
||||
* Certificate Secret
|
||||
* Tenant Custom Resource Definition
|
||||
* MutatingWebHookConfiguration
|
||||
* ValidatingWebHookConfiguration
|
||||
|
||||
@@ -30,14 +30,8 @@ spec:
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
type: boolean
|
||||
protectedNamespaceRegex:
|
||||
|
||||
@@ -222,11 +222,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -408,11 +412,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -453,9 +461,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -572,7 +580,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -646,17 +654,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -666,24 +663,37 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
@@ -755,22 +765,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -789,11 +803,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -975,11 +993,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1020,9 +1042,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1079,7 +1101,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1137,6 +1159,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1144,11 +1173,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1160,11 +1189,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1191,11 +1235,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
|
||||
@@ -40,6 +40,9 @@ helm.sh/chart: {{ include "capsule.chart" . }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.customLabels }}
|
||||
{{ toYaml .Values.customLabels }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -50,6 +53,19 @@ app.kubernetes.io/name: {{ include "capsule.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
ServiceAccount annotations
|
||||
*/}}
|
||||
{{- define "capsule.serviceAccountAnnotations" -}}
|
||||
{{- if .Values.serviceAccount.annotations }}
|
||||
{{- toYaml .Values.serviceAccount.annotations }}
|
||||
{{- end }}
|
||||
{{- if .Values.customAnnotations }}
|
||||
{{ toYaml .Values.customAnnotations }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
|
||||
@@ -3,5 +3,9 @@ kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretCaName" . }}
|
||||
data:
|
||||
|
||||
@@ -3,5 +3,9 @@ kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.secretTlsName" . }}
|
||||
data:
|
||||
|
||||
@@ -2,6 +2,12 @@ apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: CapsuleConfiguration
|
||||
metadata:
|
||||
name: default
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
forceTenantPrefix: {{ .Values.manager.options.forceTenantPrefix }}
|
||||
userGroups:
|
||||
@@ -9,5 +15,3 @@ spec:
|
||||
- {{ . }}
|
||||
{{- end}}
|
||||
protectedNamespaceRegex: {{ .Values.manager.options.protectedNamespaceRegex | quote }}
|
||||
allowTenantIngressHostnamesCollision: {{ .Values.manager.options.allowTenantIngressHostnamesCollision }}
|
||||
allowIngressHostnameCollision: {{ .Values.manager.options.allowIngressHostnameCollision }}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.deploymentName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
@@ -11,12 +15,12 @@ spec:
|
||||
{{- include "capsule.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "capsule.selectorLabels" . | nindent 8 }}
|
||||
{{- include "capsule.labels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
|
||||
@@ -4,9 +4,13 @@ kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.fullname" . }}-metrics-role
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-controller-manager-metrics-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8080
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
@@ -15,7 +19,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /namespace-owner-reference
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.namespaceOwnerReference.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: owner.namespace.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
@@ -28,6 +32,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
|
||||
@@ -5,6 +5,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
|
||||
@@ -6,16 +6,16 @@ kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-waiting-certs"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
|
||||
@@ -7,16 +7,16 @@ kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}-rbac-cleaner"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
{{- with .Values.customAnnotations }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-role
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
@@ -24,6 +28,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-metrics-reader
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
@@ -36,6 +44,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
@@ -51,6 +63,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-manager-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
|
||||
@@ -5,8 +5,8 @@ metadata:
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
{{- if or (.Values.serviceAccount.annotations) (.Values.customAnnotations) }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- include "capsule.serviceAccountAnnotations" . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -6,9 +6,13 @@ metadata:
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- with .Values.serviceMonitor.labels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.serviceMonitor.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 15s
|
||||
@@ -16,7 +20,11 @@ spec:
|
||||
path: /metrics
|
||||
jobLabel: app.kubernetes.io/name
|
||||
selector:
|
||||
matchLabels: {{ include "capsule.labels" . | nindent 6 }}
|
||||
matchLabels:
|
||||
{{- include "capsule.labels" . | nindent 6 }}
|
||||
{{- with .Values.serviceMonitor.matchLabels }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1
|
||||
@@ -15,13 +19,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /cordoning
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.cordoning.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: cordoning.tenant.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.cordoning.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -47,10 +49,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /ingresses
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.ingresses.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: ingress.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
{{- toYaml .Values.webhooks.ingresses.namespaceSelector | nindent 4}}
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
@@ -80,7 +83,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /namespaces
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.namespaces.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: namespaces.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
@@ -109,13 +112,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /networkpolicies
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.networkpolicies.failurePolicy }}
|
||||
matchPolicy: Equivalent
|
||||
name: networkpolicies.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.networkpolicies.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -140,13 +141,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /pods
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.pods.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: pods.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.pods.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -169,12 +168,10 @@ webhooks:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: capsule-system
|
||||
path: /persistentvolumeclaims
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.persistentvolumeclaims.failurePolicy }}
|
||||
name: pvc.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.persistentvolumeclaims.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -198,13 +195,11 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /services
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.services.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: services.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
{{- toYaml .Values.webhooks.services.namespaceSelector | nindent 4}}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
@@ -229,7 +224,7 @@ webhooks:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /tenants
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
failurePolicy: {{ .Values.webhooks.tenants.failurePolicy }}
|
||||
matchPolicy: Exact
|
||||
name: tenants.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
|
||||
@@ -4,6 +4,10 @@ metadata:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
|
||||
@@ -21,8 +21,6 @@ manager:
|
||||
forceTenantPrefix: false
|
||||
capsuleUserGroups: ["capsule.clastix.io"]
|
||||
protectedNamespaceRegex: ""
|
||||
allowIngressHostnameCollision: true
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@@ -44,8 +42,6 @@ jobs:
|
||||
repository: quay.io/clastix/kubectl
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "v1.20.7"
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
imagePullSecrets: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
@@ -68,7 +64,7 @@ podSecurityPolicy:
|
||||
serviceMonitor:
|
||||
enabled: false
|
||||
# Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one)
|
||||
namespace:
|
||||
namespace: ''
|
||||
# Assign additional labels according to Prometheus' serviceMonitorSelector matching labels
|
||||
labels: {}
|
||||
annotations: {}
|
||||
@@ -76,3 +72,56 @@ serviceMonitor:
|
||||
serviceAccount:
|
||||
name: capsule
|
||||
namespace: capsule-system
|
||||
|
||||
# Additional labels
|
||||
customLabels: {}
|
||||
|
||||
# Additional annotations
|
||||
customAnnotations: {}
|
||||
|
||||
# Webhooks configurations
|
||||
webhooks:
|
||||
namespaceOwnerReference:
|
||||
failurePolicy: Fail
|
||||
cordoning:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
ingresses:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
namespaces:
|
||||
failurePolicy: Fail
|
||||
networkpolicies:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
pods:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
persistentvolumeclaims:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
tenants:
|
||||
failurePolicy: Fail
|
||||
services:
|
||||
failurePolicy: Fail
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
|
||||
@@ -30,15 +30,8 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration nolint:maligned
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
|
||||
@@ -222,11 +222,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -408,11 +412,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -453,9 +461,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -572,7 +580,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -646,17 +654,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -666,24 +663,37 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
@@ -755,22 +765,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -789,11 +803,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -975,11 +993,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1020,9 +1042,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1079,7 +1101,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1137,6 +1159,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1144,11 +1173,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1160,11 +1189,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1191,11 +1235,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
|
||||
@@ -35,15 +35,8 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration nolint:maligned
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
@@ -301,11 +294,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -487,11 +484,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -532,9 +533,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -651,7 +652,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -725,17 +726,6 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means all the IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
@@ -745,24 +735,37 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
@@ -834,22 +837,26 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
@@ -868,11 +875,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1054,11 +1065,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1099,9 +1114,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1158,7 +1173,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
description: Specifies the allowed priorityClasses assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed PriorityClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1216,6 +1231,13 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
@@ -1223,11 +1245,11 @@ spec:
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
@@ -1239,11 +1261,26 @@ spec:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
loadBalancer:
|
||||
default: true
|
||||
description: Specifies if LoadBalancer service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
@@ -1270,11 +1307,11 @@ spec:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
description: The operational state of the Tenant. Possible values are "active", "cordoned".
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
@@ -1374,7 +1411,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: quay.io/clastix/capsule:v0.1.0-rc4
|
||||
image: quay.io/clastix/capsule:v0.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
ports:
|
||||
@@ -1408,8 +1445,6 @@ metadata:
|
||||
name: capsule-default
|
||||
namespace: capsule-system
|
||||
spec:
|
||||
allowIngressHostnameCollision: false
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
userGroups:
|
||||
@@ -1437,6 +1472,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
sideEffects: None
|
||||
|
||||
@@ -6,5 +6,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -7,4 +7,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: quay.io/clastix/capsule
|
||||
newTag: v0.1.0-rc4
|
||||
newTag: v0.1.0
|
||||
|
||||
@@ -7,5 +7,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -18,27 +18,29 @@ spec:
|
||||
allowedRegex: ^\w+.gcr.io$
|
||||
serviceOptions:
|
||||
additionalMetadata:
|
||||
additionalAnnotations:
|
||||
annotations:
|
||||
capsule.clastix.io/bgp: "true"
|
||||
additionalLabels:
|
||||
labels:
|
||||
capsule.clastix.io/pool: gas
|
||||
allowedServices:
|
||||
nodePort: false
|
||||
externalName: false
|
||||
externalServiceIPs:
|
||||
allowed:
|
||||
- 10.20.0.0/16
|
||||
- "10.96.42.42"
|
||||
externalIPs:
|
||||
allowed:
|
||||
- 10.20.0.0/16
|
||||
- "10.96.42.42"
|
||||
imagePullPolicies:
|
||||
- Always
|
||||
ingressClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+-lb$
|
||||
ingressHostnames:
|
||||
allowed:
|
||||
- gas.acmecorp.com
|
||||
allowedRegex: ^.*acmecorp.com$
|
||||
ingressOptions:
|
||||
hostnameCollisionScope: Cluster
|
||||
allowedClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+-lb$
|
||||
allowedHostnames:
|
||||
allowed:
|
||||
- gas.acmecorp.com
|
||||
allowedRegex: ^.*acmecorp.com$
|
||||
limitRanges:
|
||||
items:
|
||||
-
|
||||
@@ -71,12 +73,13 @@ spec:
|
||||
min:
|
||||
storage: 1Gi
|
||||
type: PersistentVolumeClaim
|
||||
namespaceQuota: 3
|
||||
namespacesMetadata:
|
||||
additionalAnnotations:
|
||||
capsule.clastix.io/backup: "false"
|
||||
additionalLabels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
namespaceOptions:
|
||||
quota: 3
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
capsule.clastix.io/backup: "false"
|
||||
labels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
networkPolicies:
|
||||
items:
|
||||
-
|
||||
|
||||
@@ -22,6 +22,7 @@ webhooks:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- namespaces
|
||||
sideEffects: None
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package rbac
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -25,8 +25,8 @@ type Manager struct {
|
||||
}
|
||||
|
||||
// InjectClient injects the Client interface, required by the Runnable interface
|
||||
func (r *Manager) InjectClient(c client.Client) error {
|
||||
r.Client = c
|
||||
func (c *Manager) InjectClient(client client.Client) error {
|
||||
c.Client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -52,22 +52,22 @@ func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
func (c *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1alpha1.CapsuleConfiguration{}, forOptionPerInstanceName(configurationName)).
|
||||
Complete(r)
|
||||
Complete(c)
|
||||
}
|
||||
|
||||
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
r.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
func (c *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
c.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
|
||||
cfg := configuration.NewCapsuleConfiguration(r.Client, request.Name)
|
||||
cfg := configuration.NewCapsuleConfiguration(c.Client, request.Name)
|
||||
// Validating the Capsule Configuration options
|
||||
if _, err = cfg.ProtectedNamespaceRegexp(); err != nil {
|
||||
panic(errors.Wrap(err, "Invalid configuration for protected Namespace regex"))
|
||||
}
|
||||
|
||||
r.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
c.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ var (
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"namespaces"},
|
||||
Verbs: []string{"delete"},
|
||||
Verbs: []string{"delete", "patch"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -157,7 +157,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
|
||||
group := errgroup.Group{}
|
||||
group := new(errgroup.Group)
|
||||
group.Go(func() error {
|
||||
return r.UpdateMutatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
|
||||
@@ -9,12 +9,13 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -112,8 +113,38 @@ func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
|
||||
}
|
||||
|
||||
if instance.Name == tlsSecretName && res == controllerutil.OperationResultUpdated {
|
||||
r.Log.Info("Capsule TLS certificates has been updated, we need to restart the Controller")
|
||||
_ = syscall.Kill(syscall.Getpid(), syscall.SIGINT)
|
||||
r.Log.Info("Capsule TLS certificates has been updated, Controller pods must be restarted to load new certificate")
|
||||
|
||||
hostname, _ := os.Hostname()
|
||||
leaderPod := &corev1.Pod{}
|
||||
if err = r.Client.Get(ctx, types.NamespacedName{Namespace: os.Getenv("NAMESPACE"), Name: hostname}, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve the leader Pod, probably running in out of the cluster mode")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
podList := &corev1.PodList{}
|
||||
if err = r.Client.List(ctx, podList, client.MatchingLabels(leaderPod.ObjectMeta.Labels)); err != nil {
|
||||
r.Log.Error(err, "cannot retrieve list of Capsule pods requiring restart upon TLS update")
|
||||
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
for _, p := range podList.Items {
|
||||
nonLeaderPod := p
|
||||
// Skipping this Pod, must be deleted at the end
|
||||
if nonLeaderPod.GetName() == leaderPod.GetName() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, &nonLeaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the non-leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
if err = r.Client.Delete(ctx, leaderPod); err != nil {
|
||||
r.Log.Error(err, "cannot delete the leader Pod due to TLS update")
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Reconciliation completed, processing back in " + rq.String())
|
||||
|
||||
@@ -53,8 +53,8 @@ func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, r.obj, func() (err error) {
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.AdditionalLabels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.AdditionalAnnotations))
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.Labels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.Annotations))
|
||||
return nil
|
||||
})
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
@@ -24,12 +25,16 @@ func (r *EndpointSlicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
if r.VersionMajor == 1 && r.VersionMinor <= 16 {
|
||||
switch {
|
||||
case r.VersionMajor == 1 && r.VersionMinor <= 16:
|
||||
r.Log.Info("Skipping controller setup, as EndpointSlices are not supported on current kubernetes version", "VersionMajor", r.VersionMajor, "VersionMinor", r.VersionMinor)
|
||||
return nil
|
||||
case r.VersionMajor == 1 && r.VersionMinor >= 21:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1.EndpointSlice{}
|
||||
default:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
}
|
||||
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
|
||||
80
controllers/tenant/limitranges.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncLimitRange(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncLimitRange(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting LimitRange labels for the mutateFn
|
||||
var tenantLabel, limitRangeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
if limitRangeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(namespace, keys, &corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
target := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
limitRangeLabel: strconv.Itoa(i),
|
||||
}
|
||||
target.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
127
controllers/tenant/manager.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
if instance.Spec.NetworkPolicies != nil {
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies.Items))
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.LimitRanges != nil {
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.ResourceQuota != nil {
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *Manager) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
183
controllers/tenant/namespaces.go
Normal file
@@ -0,0 +1,183 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
annotations := make(map[string]string)
|
||||
labels := map[string]string{
|
||||
"name": namespace,
|
||||
capsuleLabel: tnt.GetName(),
|
||||
}
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
annotations["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressOptions.AllowedClasses != nil {
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressOptions.AllowedClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressOptions.AllowedClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceLabelsAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceLabelsAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceLabelsRegexpAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceLabelsRegexpAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsAnnotation] = value
|
||||
}
|
||||
|
||||
if value, ok := tnt.Annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsRegexpAnnotation]; ok {
|
||||
annotations[capsulev1beta1.ForbiddenNamespaceAnnotationsRegexpAnnotation] = value
|
||||
}
|
||||
|
||||
if ns.Annotations == nil {
|
||||
ns.SetAnnotations(annotations)
|
||||
} else {
|
||||
for k, v := range annotations {
|
||||
ns.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if ns.Labels == nil {
|
||||
ns.SetLabels(labels)
|
||||
} else {
|
||||
for k, v := range labels {
|
||||
ns.Labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found.Status.Size = tenant.Status.Size
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
list := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), list, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(list.Items)
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
82
controllers/tenant/networkpolicies.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNetworkPolicy(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncNetworkPolicy(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
if err = r.pruningResources(namespace, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
var tenantLabel, networkPolicyLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if networkPolicyLabel, err = capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
target := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
networkPolicyLabel: strconv.Itoa(i),
|
||||
})
|
||||
target.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
251
controllers/tenant/resourcequotas.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// When the Resource Budget assigned to a Tenant is Tenant-scoped we have to rely on the ResourceQuota resources to
|
||||
// represent the resource quota for the single Tenant rather than the single Namespace,
|
||||
// so abusing of this API although its Namespaced scope.
|
||||
//
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in first time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger following reconciliations but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
//
|
||||
// In case of Namespace-scoped Resource Budget, we're just replicating the resources across all registered Namespaces.
|
||||
func (r *Manager) syncResourceQuotas(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeTenant {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
index := i
|
||||
|
||||
resourceQuota := q
|
||||
|
||||
group.Go(func() (scopeErr error) {
|
||||
// Calculating the Resource Budget at Tenant scope just if this is put in place.
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
var tntRequirement *labels.Requirement
|
||||
if tntRequirement, scopeErr = labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
var indexRequirement *labels.Requirement
|
||||
if indexRequirement, scopeErr = labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(index)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
list := &corev1.ResourceQuotaList{}
|
||||
if scopeErr = r.List(context.TODO(), list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
|
||||
return
|
||||
}
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for name, hardQuota := range resourceQuota.Hard {
|
||||
r.Log.Info("Desired hard " + name.String() + " quota is " + hardQuota.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var quantity resource.Quantity
|
||||
for _, item := range list.Items {
|
||||
quantity.Add(item.Status.Used[name])
|
||||
}
|
||||
r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
|
||||
|
||||
switch quantity.Cmp(resourceQuota.Hard[name]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for item := range list.Items {
|
||||
if _, ok := list.Items[item].Status.Used[name]; ok {
|
||||
list.Items[item].Spec.Hard[name] = list.Items[item].Status.Used[name]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[name] = resource.Quantity{}
|
||||
list.Items[item].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for item := range list.Items {
|
||||
if list.Items[item].Spec.Hard == nil {
|
||||
list.Items[item].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
list.Items[item].Spec.Hard[name] = resourceQuota.Hard[name]
|
||||
}
|
||||
}
|
||||
if scopeErr = r.resourceQuotasUpdate(name, quantity, resourceQuota.Hard[name], list.Items...); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "cannot proceed with outer ResourceQuota")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
// Waiting the update of all ResourceQuotas
|
||||
if err = group.Wait(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncResourceQuota(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncResourceQuota(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Pruning resource of non-requested resources
|
||||
if err = r.pruningResources(namespace, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for index, resQuota := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, index),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
res, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(index),
|
||||
})
|
||||
target.Spec.Scopes = resQuota.Scopes
|
||||
target.Spec.ScopeSelector = resQuota.ScopeSelector
|
||||
// In case of Namespace scope for the ResourceQuota we can easily apply the bare specification
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeNamespace {
|
||||
target.Spec.Hard = resQuota.Hard
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *Manager) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
|
||||
group.Go(func() (err error) {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err = r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
_, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
179
controllers/tenant/rolebindings.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *Manager) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hashFn := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hashFn(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncAdditionalRoleBinding(tenant, namespace, keys, hashFn)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncAdditionalRoleBinding(tenant *capsulev1beta1.Tenant, ns string, keys []string, hashFn func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string) (err error) {
|
||||
var tenantLabel, roleBindingLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if roleBindingLabel, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
roleBindingHashLabel := hashFn(roleBinding)
|
||||
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() error {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
roleBindingLabel: roleBindingHashLabel,
|
||||
}
|
||||
target.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
target.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", target.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *Manager) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
list := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for namespacedName, roleRef := range list {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = newLabels
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = roleRef
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
63
controllers/tenant/utils.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *Manager) pruningResources(ns string, keys []string, obj client.Object) (err error) {
|
||||
var capsuleLabel string
|
||||
if capsuleLabel, err = capsulev1beta1.GetTypeLabel(obj); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
|
||||
var exists *labels.Requirement
|
||||
if exists, err = labels.NewRequirement(capsuleLabel, selection.Exists, []string{}); err != nil {
|
||||
return
|
||||
}
|
||||
selector = selector.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
if notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
selector = selector.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + selector.String())
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: selector,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
@@ -1,739 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// TenantReconciler reconciles a Tenant object
|
||||
type TenantReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TenantReconciler) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
err = r.Get(ctx, request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
if instance.Spec.NetworkPolicies != nil {
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies.Items))
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.LimitRanges != nil {
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.ResourceQuota != nil {
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client.Object) error {
|
||||
capsuleLabel, err := capsulev1beta1.GetTypeLabel(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := labels.NewSelector()
|
||||
|
||||
exists, err := labels.NewRequirement(capsuleLabel, selection.Exists, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + s.String())
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: s,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *TenantReconciler) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) error {
|
||||
g := errgroup.Group{}
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
g.Go(func() error {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = g.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *TenantReconciler) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hash := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hash(i))
|
||||
}
|
||||
|
||||
var tl, ll string
|
||||
tl, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ll, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
lv := hash(roleBinding)
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, rb, func() error {
|
||||
rb.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: lv,
|
||||
}
|
||||
rb.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
rb.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, rb, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, rb.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", rb.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", rb.Name, "namespace", rb.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// We're relying on the ResourceQuota resource to represent the resource quota for the single Tenant rather than the
|
||||
// single Namespace, so abusing of this API although its Namespaced scope.
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in a second time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger a following reconciliation but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
func (r *TenantReconciler) syncResourceQuotas(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
tenantLabel, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
typeLabel, err := capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(i),
|
||||
})
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
tr, err := labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
ir, err := labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(i)})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
rql := &corev1.ResourceQuotaList{}
|
||||
err = r.List(context.TODO(), rql, &client.ListOptions{
|
||||
LabelSelector: labels.NewSelector().Add(*tr).Add(*ir),
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot list ResourceQuota", "tenantFilter", tr.String(), "indexFilter", ir.String())
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for rn, rq := range q.Hard {
|
||||
r.Log.Info("Desired hard " + rn.String() + " quota is " + rq.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var qt resource.Quantity
|
||||
for _, rq := range rql.Items {
|
||||
qt.Add(rq.Status.Used[rn])
|
||||
}
|
||||
r.Log.Info("Computed " + rn.String() + " quota for the whole Tenant is " + qt.String())
|
||||
|
||||
switch qt.Cmp(q.Hard[rn]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for i := range rql.Items {
|
||||
if _, ok := rql.Items[i].Status.Used[rn]; ok {
|
||||
rql.Items[i].Spec.Hard[rn] = rql.Items[i].Status.Used[rn]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[rn] = resource.Quantity{}
|
||||
rql.Items[i].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for i := range rql.Items {
|
||||
if rql.Items[i].Spec.Hard == nil {
|
||||
rql.Items[i].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
rql.Items[i].Spec.Hard[rn] = q.Hard[rn]
|
||||
}
|
||||
target.Spec = q
|
||||
}
|
||||
if err := r.resourceQuotasUpdate(rn, qt, q.Hard[rn], rql.Items...); err != nil {
|
||||
r.Log.Error(err, "cannot proceed with outer ResourceQuota")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting LimitRange labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ll, err := capsulev1beta1.GetTypeLabel(&corev1.LimitRange{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.LimitRange{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
t := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: strconv.Itoa(i),
|
||||
}
|
||||
t.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
a := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalAnnotations {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
a["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressClasses != nil {
|
||||
if len(tnt.Spec.IngressClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
ns.SetAnnotations(a)
|
||||
|
||||
l := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalLabels {
|
||||
l[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
l["name"] = namespace
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
l[capsuleLabel] = tnt.GetName()
|
||||
ns.SetLabels(l)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := errgroup.Group{}
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nl, err := capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
t := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.SetLabels(map[string]string{
|
||||
tl: tenant.Name,
|
||||
nl: strconv.Itoa(i),
|
||||
})
|
||||
t.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *TenantReconciler) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
rbl := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for nn, rr := range rbl {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nn.Name,
|
||||
Namespace: nn.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = l
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = rr
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
found.Status.Size = tenant.Status.Size
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
nl := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), nl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(nl.Items)
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
BIN
docs/assets/datasource.png
Executable file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
docs/assets/manager-controllers.png
Executable file
|
After Width: | Height: | Size: 28 KiB |
BIN
docs/assets/prometheus_targets.png
Executable file
|
After Width: | Height: | Size: 30 KiB |
BIN
docs/assets/rest-client-error-rate.png
Executable file
|
After Width: | Height: | Size: 63 KiB |
BIN
docs/assets/rest-client-latency.png
Executable file
|
After Width: | Height: | Size: 14 KiB |
BIN
docs/assets/saturation.png
Executable file
|
After Width: | Height: | Size: 79 KiB |
BIN
docs/assets/upload_json.png
Executable file
|
After Width: | Height: | Size: 22 KiB |
BIN
docs/assets/webhook-error-rate.png
Executable file
|
After Width: | Height: | Size: 131 KiB |
BIN
docs/assets/webhook-latency.png
Executable file
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/assets/workqueue.png
Executable file
|
After Width: | Height: | Size: 57 KiB |
@@ -5,40 +5,4 @@ Currently, the Capsule ecosystem comprises the following:
|
||||
|
||||
* [Capsule Operator](./operator/overview.md)
|
||||
* [Capsule Proxy](./proxy/overview.md)
|
||||
* [Capsule Lens extension](lens-extension/overview.md) Coming soon!
|
||||
|
||||
## Documents structure
|
||||
```command
|
||||
docs
|
||||
├── index.md
|
||||
├── lens-extension
|
||||
│ └── overview.md
|
||||
├── proxy
|
||||
│ ├── overview.md
|
||||
│ ├── sidecar.md
|
||||
│ └── standalone.md
|
||||
└── operator
|
||||
├── contributing.md
|
||||
├── getting-started.md
|
||||
├── monitoring.md
|
||||
├── overview.md
|
||||
├── references.md
|
||||
└── use-cases
|
||||
├── create-namespaces.md
|
||||
├── custom-resources.md
|
||||
├── images-registries.md
|
||||
├── ingress-classes.md
|
||||
├── ingress-hostnames.md
|
||||
├── multiple-tenants.md
|
||||
├── network-policies.md
|
||||
├── node-ports.md
|
||||
├── nodes-pool.md
|
||||
├── onboarding.md
|
||||
├── overview.md
|
||||
├── permissions.md
|
||||
├── pod-priority-class.md
|
||||
├── pod-security-policies.md
|
||||
├── resources-quota-limits.md
|
||||
├── storage-classes.md
|
||||
└── taint-namespaces.md
|
||||
```
|
||||
* [Capsule Lens extension](./lens-extension/overview.md)
|
||||
|
||||
@@ -1,2 +1,11 @@
|
||||
# Capsule extension for Mirantis Lens
|
||||
Coming soon.
|
||||
# Capsule extension for Lens
|
||||
With Capsule extension for [Lens](https://github.com/lensapp/lens), a cluster administrator can easily manage from a single pane of glass all resources of a Kubernetes cluster, including all the Tenants created through the Capsule Operator.
|
||||
|
||||
## Features
|
||||
Capsule extension for Lens provides these capabilities:
|
||||
|
||||
- List all tenants
|
||||
- See tenant details and change through the embedded Lens editor
|
||||
- Check Resources Quota and Budget at both the tenant and namespace level
|
||||
|
||||
Please, see the [README](https://github.com/clastix/capsule-lens-extension) for details about the installation of the Capsule Lens Extension.
|
||||
|
||||
@@ -27,7 +27,7 @@ the CRDs manifests, as well the deep copy functions, require _Operator SDK_:
|
||||
the binary has to be installed into your `PATH`.
|
||||
|
||||
### Installing Kubebuilder
|
||||
With the latest release of OperatorSDK there's a more tightly integration with
|
||||
With the latest release of OperatorSDK there's a more tighten integration with
|
||||
Kubebuilder and its opinionated testing suite: ensure to download the latest
|
||||
binaries available from the _Releases_ GitHub page and place them into the
|
||||
`/usr/local/kubebuilder/bin` folder, ensuring this is also in your `PATH`.
|
||||
@@ -97,7 +97,7 @@ You can check if Capsule is running tailing the logs:
|
||||
```
|
||||
|
||||
Since Capsule is built using _OperatorSDK_, logging is handled by the zap
|
||||
module: log verbosity of the Capsule controller can be increased by passing
|
||||
module: log verbosity of the Capsule controller can be increased passing
|
||||
the `--zap-log-level` option with a value from `1` to `10` or the
|
||||
[basic keywords](https://godoc.org/go.uber.org/zap/zapcore#Level) although
|
||||
it is suggested to use the `--zap-devel` flag to get also stack traces.
|
||||
@@ -124,7 +124,7 @@ deployment.apps/capsule-controller-manager scaled
|
||||
> This is mandatory since Capsule uses Leader Election
|
||||
|
||||
#### Providing TLS certificate for webhooks
|
||||
Next step is to replicate the same environment Capsule is expecting in the Pod,
|
||||
The next step is to replicate the same environment Capsule is expecting in the Pod,
|
||||
it means creating a fake certificate to handle HTTP requests.
|
||||
|
||||
``` bash
|
||||
@@ -133,8 +133,8 @@ kubectl -n capsule-system get secret capsule-tls -o jsonpath='{.data.tls\.crt}'
|
||||
kubectl -n capsule-system get secret capsule-tls -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/k8s-webhook-server/serving-certs/tls.key
|
||||
```
|
||||
|
||||
> We're using the certificates generate upon first installation of Capsule:
|
||||
> it means the Secret will be populated at first start-up.
|
||||
> We're using the certificates generate upon the first installation of Capsule:
|
||||
> it means the Secret will be populated at the first start-up.
|
||||
> If you plan to run it locally since the beginning, it means you will require
|
||||
> to provide a self-signed certificate in the said directory.
|
||||
|
||||
@@ -167,9 +167,10 @@ _ValidatingWebhookConfiguration_ too, adding the said `ngrok` URL as base for
|
||||
each defined webhook, as following:
|
||||
|
||||
```diff
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
|
||||
name: capsule-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- name: owner.namespace.capsule.clastix.io
|
||||
@@ -241,8 +242,10 @@ A commit description is welcomed to explain more the changes: just ensure
|
||||
to put a blank line and an arbitrary number of maximum 72 characters long
|
||||
lines, at most one blank line between them.
|
||||
|
||||
Please, split changes into several and documented small commits: this will help
|
||||
us to perform a better review.
|
||||
Please, split changes into several and documented small commits: this will help us to perform a better review. Commits must follow the Conventional Commits Specification, a lightweight convention on top of commit messages. It provides an easy set of rules for creating an explicit commit history; which makes it easier to write automated tools on top of. This convention dovetails with Semantic Versioning, by describing the features, fixes, and breaking changes made in commit messages. See [Conventional Commits Specification](https://www.conventionalcommits.org) to learn about Conventional Commits.
|
||||
|
||||
> In case of errors or need of changes to previous commits,
|
||||
> fix them squashing to make changes atomic.
|
||||
|
||||
### Miscellanea
|
||||
Please, add a new single line at end of any file as the current coding style.
|
||||
|
||||
@@ -6,54 +6,47 @@ Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](https://github.com/clastix/capsule/tree/master/charts/capsule)
|
||||
* Use [`kustomize`](https://github.com/kubernetes-sigs/kustomize)
|
||||
* Use the [single YAML file installer](https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml)
|
||||
* Use the [Capsule Helm Chart](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md)
|
||||
|
||||
### Install with kustomize
|
||||
Ensure you have `kubectl` and `kustomize` installed in your `PATH`.
|
||||
|
||||
Clone this repository and move to the repo folder:
|
||||
### Install with the single YAML file installer
|
||||
Ensure you have `kubectl` installed in your `PATH`. Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/clastix/capsule
|
||||
$ cd capsule
|
||||
$ make deploy
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
# Create your first Tenant
|
||||
In Capsule, a _Tenant_ is an abstraction to group togheter multiple namespaces in a single entity within a set of bundaries defined by the Cluster Administrator. The tenant is then assigned to a user or group of users who is called _Tenant Owner_.
|
||||
### Install with Helm Chart
|
||||
Please, refer to the instructions reported in the Capsule Helm Chart [README](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md).
|
||||
|
||||
Capsule defines a Tenant as Custom Resource with cluster scope:
|
||||
# Create your first Tenant
|
||||
In Capsule, a _Tenant_ is an abstraction to group multiple namespaces in a single entity within a set of boundaries defined by the Cluster Administrator. The tenant is then assigned to a user or group of users who is called _Tenant Owner_.
|
||||
|
||||
Capsule defines a Tenant as Custom Resource with cluster scope.
|
||||
|
||||
Create the tenant as cluster admin:
|
||||
|
||||
```yaml
|
||||
cat <<EOF > oil_tenant.yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
owners:
|
||||
- name: alice
|
||||
kind: User
|
||||
namespaceQuota: 3
|
||||
EOF
|
||||
```
|
||||
|
||||
Apply as cluster admin:
|
||||
|
||||
```
|
||||
$ kubectl apply -f oil_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
```
|
||||
|
||||
You can check the tenant just created as cluster admin
|
||||
You can check the tenant just created
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 0 alice User 1m
|
||||
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
|
||||
oil Active 0 10s
|
||||
```
|
||||
|
||||
## Tenant owners
|
||||
@@ -65,27 +58,21 @@ Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
Users authenticated through an _OIDC token_ must have in their token:
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
in their token.
|
||||
|
||||
The [hack/create-user.sh](../../hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh alice oil
|
||||
creating certs in TMPDIR /tmp/tmp.4CLgpuime3
|
||||
Generating RSA private key, 2048 bit long modulus (2 primes)
|
||||
............+++++
|
||||
........................+++++
|
||||
e is 65537 (0x010001)
|
||||
...
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil created
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil approved
|
||||
kubeconfig file is: alice-oil.kubeconfig
|
||||
@@ -112,7 +99,7 @@ $ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
but limited to only your namespaces:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
@@ -120,4 +107,4 @@ Error from server (Forbidden): pods is forbidden: User "alice" cannot list resou
|
||||
```
|
||||
|
||||
# What’s next
|
||||
The Tenant Owners have full administrative permissions limited to only the namespaces in the assigned tenant. However, their permissions can be controlled by the Cluster Admin by setting rules and policies on the assigned tenant. See the [use cases](./use-cases/overview.md) page for more getting more cool things you can do with Capsule.
|
||||
The Tenant Owners have full administrative permissions limited to only the namespaces in the assigned tenant. However, their permissions can be controlled by the Cluster Admin by setting rules and policies on the assigned tenant. See the [use cases](./use-cases/overview.md) page for more getting more cool things you can do with Capsule.
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# Capsule with Amazon EKS
|
||||
|
||||
This is an example how to install Amazon EKS cluster and one user
|
||||
manged by capsule.
|
||||
# Capsule on AWS EKS
|
||||
This is an example of how to install AWS EKS cluster and one user
|
||||
manged by Capsule.
|
||||
|
||||
It is based on [Using IAM Groups to manage Kubernetes access](https://www.eksworkshop.com/beginner/091_iam-groups/intro/)
|
||||
|
||||
@@ -115,7 +114,7 @@ EOF
|
||||
|
||||
----
|
||||
|
||||
Export "admin" kubeconfig to be able to install capsule:
|
||||
Export "admin" kubeconfig to be able to install Capsule:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=kubeconfig.conf
|
||||
@@ -131,7 +130,7 @@ helm upgrade --install --version 0.0.19 --namespace capsule-system --create-name
|
||||
Use the default Tenant example:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/samples/capsule_v1alpha1_tenant.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/samples/capsule_v1beta1_tenant.yaml
|
||||
```
|
||||
|
||||
Based on the tenant configuration above the user `alice` should be able
|
||||
16
docs/operator/managed-kubernetes/overview.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# Capsule over Managed Kubernetes
|
||||
Capsule Operator can be easily installed on a Managed Kubernetes Service. Since in these services, you do not have access to the Kubernetes APIs Server, you should check with your service provider following pre-requisites:
|
||||
|
||||
- the default `cluster-admin` ClusterRole is accessible
|
||||
- the following Admission Webhooks are enabled on the APIs Server:
|
||||
- PodNodeSelector
|
||||
- LimitRanger
|
||||
- ResourceQuota
|
||||
- MutatingAdmissionWebhook
|
||||
- ValidatingAdmissionWebhook
|
||||
|
||||
* [AWS EKS](./aws-eks.md)
|
||||
* CoAKS - Capsule over Azure Kubernetes Service
|
||||
* Google Cloud GKE
|
||||
* IBM Cloud
|
||||
* OVH
|
||||
@@ -1,2 +1,181 @@
|
||||
# Monitoring Capsule
|
||||
Coming soon.
|
||||
|
||||
The Capsule dashboard allows you to track the health and performance of Capsule manager and tenants, with particular attention to resources saturation, server responses, and latencies.
|
||||
|
||||
## Requirements
|
||||
|
||||
### Prometheus
|
||||
|
||||
Prometheus is an open-source monitoring system and time series database; it is based on a multi-dimensional data model and uses PromQL, a powerful query language, to leverage it.
|
||||
|
||||
- Minimum version: 1.0.0
|
||||
|
||||
### Grafana
|
||||
|
||||
Grafana is an open-source monitoring solution that offers a flexible way to generate visuals and configure dashboards.
|
||||
|
||||
- Minimum version: 7.5.5
|
||||
|
||||
To fastly deploy this monitoring stack, consider installing the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator).
|
||||
|
||||
---
|
||||
|
||||
## Quick Start
|
||||
|
||||
The Capsule Helm [charts](https://github.com/clastix/capsule/tree/master/charts/capsule) allow you to automatically create Kubernetes minimum resources needed for the proper functioning of the dashboard:
|
||||
|
||||
* ServiceMonitor
|
||||
* Role
|
||||
* RoleBinding
|
||||
|
||||
N.B: we assume that a ServiceAccount resource has already been created so it can easily interact with the Prometheus API.
|
||||
|
||||
### Helm install
|
||||
|
||||
During Capsule installation, set the `serviceMonitor` fields as follow:
|
||||
|
||||
```yaml
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
[...]
|
||||
serviceAccount:
|
||||
name: <prometheus-sa>
|
||||
namespace: <prometheus-sa-namespace>
|
||||
```
|
||||
Take a look at the Helm charts [README.md](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md#customize-the-installation) file for further customization.
|
||||
|
||||
### Check Service Monitor
|
||||
|
||||
Verify that the service monitor is working correctly through the Prometheus "targets" page :
|
||||
|
||||

|
||||
|
||||
### Deploy dashboard
|
||||
|
||||
Simply upload [dashboard.json](https://github.com/clastix/capsule/blob/master/config/grafana/dashboard.json) file to Grafana through _Create_ -> _Import_,
|
||||
making sure to select the correct Prometheus data source:
|
||||
|
||||

|
||||
|
||||
## In-depth view
|
||||
|
||||
### Features
|
||||
* [Manager controllers](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#manager-controllers)
|
||||
* [Webhook error rate](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#webhook-error-rate)
|
||||
* [Webhook latency](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#webhook-latency)
|
||||
* [REST client latency](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#rest-client-latency)
|
||||
* [REST client error rate](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#rest-client-error-rate)
|
||||
* [Saturation](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#saturation)
|
||||
* [Workqueue](https://github.com/clastix/capsule/blob/master/docs/operator/monitoring.md#workqueue)
|
||||
|
||||
---
|
||||
|
||||
#### Manager controllers
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about the medium time delay between manager client input, side effects, and new state determination (reconciliation).
|
||||
|
||||
##### Dependant variables and available values
|
||||
|
||||
* Controller name
|
||||
- capsuleconfiguration
|
||||
- clusterrole
|
||||
- clusterrolebinding
|
||||
- endpoints
|
||||
- endpointslice
|
||||
- secret
|
||||
- service
|
||||
- tenant
|
||||
|
||||
#### Webhook error rate
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about webhook requests response, mainly focusing on server-side errors research.
|
||||
|
||||
##### Dependant variables and available values
|
||||
|
||||
* Webhook
|
||||
- cordoning
|
||||
- ingresses
|
||||
- namespace-owner-reference
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- persistentvolumeclaims
|
||||
- pods
|
||||
- services
|
||||
- tenants
|
||||
|
||||
#### Webhook latency
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about the medium time delay between webhook trigger, side effects, and data written on etcd.
|
||||
|
||||
##### Dependant variables and available values
|
||||
|
||||
* Webhook
|
||||
- cordoning
|
||||
- ingresses
|
||||
- namespace-owner-reference
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- persistentvolumeclaims
|
||||
- pods
|
||||
- services
|
||||
- tenants
|
||||
|
||||
#### REST client latency
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about the medium time delay between all the calls done by the controller and the API server.
|
||||
Data display may depend on the REST client verb considered and on available REST client URLs.
|
||||
|
||||
YMMV
|
||||
|
||||
##### Dependant variables and available values
|
||||
|
||||
* REST client URL
|
||||
* REST client verb
|
||||
- GET
|
||||
- PUT
|
||||
- POST
|
||||
- PATCH
|
||||
- DELETE
|
||||
|
||||
#### REST client error rate
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about client total rest requests response per unit time, grouped by thrown code.
|
||||
|
||||
#### Saturation
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about resources, giving a detailed picture of the system’s state and the amount of requested work per active controller.
|
||||
|
||||
#### Workqueue
|
||||
|
||||

|
||||
|
||||
##### Description
|
||||
|
||||
This section provides information about "actions" in the queue, particularly:
|
||||
- Workqueue latency: time to complete a series of actions in the queue ;
|
||||
- Workqueue rate: number of actions per unit time ;
|
||||
- Workqueue depth: number of pending actions waiting in the queue.
|
||||
|
||||
@@ -0,0 +1,77 @@
|
||||
# Allow self-service management of Network Policies
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating their own network policies in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of network-policies.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
```
|
||||
|
||||
As a tenant, checks for permissions to manage networkpolicy for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i create networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i update networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i patch networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i delete networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i deletecollection networkpolicies
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
@@ -0,0 +1,58 @@
|
||||
# Allow self-service management of Role Bindings
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating their rolebindings in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of roles.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner check for permissions to manage rolebindings for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get rolebindings
|
||||
kubectl --kubeconfig alice auth can-i create rolebindings
|
||||
kubectl --kubeconfig alice auth can-i update rolebindings
|
||||
kubectl --kubeconfig alice auth can-i patch rolebindings
|
||||
kubectl --kubeconfig alice auth can-i delete rolebindings
|
||||
kubectl --kubeconfig alice auth can-i deletecollection rolebindings
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
58
docs/operator/mtb/allow-self-service-management-of-roles.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Allow self-service management of Roles
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating their own roles in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of roles.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, check for permissions to manage roles for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get roles
|
||||
kubectl --kubeconfig alice auth can-i create roles
|
||||
kubectl --kubeconfig alice auth can-i update roles
|
||||
kubectl --kubeconfig alice auth can-i patch roles
|
||||
kubectl --kubeconfig alice auth can-i delete roles
|
||||
kubectl --kubeconfig alice auth can-i deletecollection roles
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
113
docs/operator/mtb/block-access-to-cluster-resources.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# Block access to cluster resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Tenants should not be able to view, edit, create or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
|
||||
|
||||
**Rationale:** Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As cluster admin, run the following command to retrieve the list of non-namespaced resources
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin api-resources --namespaced=false
|
||||
```
|
||||
For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following command:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i <verb> <resource>
|
||||
```
|
||||
Each command must return `no`
|
||||
|
||||
**Exception:**
|
||||
|
||||
It should, but it does not:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectaccessreviews
|
||||
yes
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectrulesreviews
|
||||
yes
|
||||
kubectl --kubeconfig alice auth can-i create namespaces
|
||||
yes
|
||||
```
|
||||
|
||||
Any kubernetes user can create `SelfSubjectAccessReview` and `SelfSubjectRulesReviews` to checks whether he/she can act. First, two exceptions are not an issue.
|
||||
|
||||
```bash
|
||||
kubectl --anyuser auth can-i --list
|
||||
Resources Non-Resource URLs Resource Names Verbs
|
||||
selfsubjectaccessreviews.authorization.k8s.io [] [] [create]
|
||||
selfsubjectrulesreviews.authorization.k8s.io [] [] [create]
|
||||
[/api/*] [] [get]
|
||||
[/api] [] [get]
|
||||
[/apis/*] [] [get]
|
||||
[/apis] [] [get]
|
||||
[/healthz] [] [get]
|
||||
[/healthz] [] [get]
|
||||
[/livez] [] [get]
|
||||
[/livez] [] [get]
|
||||
[/openapi/*] [] [get]
|
||||
[/openapi] [] [get]
|
||||
[/readyz] [] [get]
|
||||
[/readyz] [] [get]
|
||||
[/version/] [] [get]
|
||||
[/version/] [] [get]
|
||||
[/version] [] [get]
|
||||
[/version] [] [get]
|
||||
```
|
||||
|
||||
To enable namespace self-service provisioning, Capsule intentionally gives permissions to create namespaces to all users belonging to the Capsule group:
|
||||
|
||||
```bash
|
||||
kubectl describe clusterrolebindings capsule-namespace-provisioner
|
||||
Name: capsule-namespace-provisioner
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
Role:
|
||||
Kind: ClusterRole
|
||||
Name: capsule-namespace-provisioner
|
||||
Subjects:
|
||||
Kind Name Namespace
|
||||
---- ---- ---------
|
||||
Group capsule.clastix.io
|
||||
|
||||
kubectl describe clusterrole capsule-namespace-provisioner
|
||||
Name: capsule-namespace-provisioner
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
PolicyRule:
|
||||
Resources Non-Resource URLs Resource Names Verbs
|
||||
--------- ----------------- -------------- -----
|
||||
namespaces [] [] [create]
|
||||
```
|
||||
|
||||
Capsule controls self-service namespace creation by limiting the number of namespaces the user can create by the `tenant.spec.namespaceQuota option`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
155
docs/operator/mtb/block-access-to-multitenant-resources.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Block access to multitenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Each tenant namespace may contain resources set up by the cluster administrator for multi-tenancy, such as role bindings, and network policies. Tenants should not be allowed to modify the namespaced resources created by the cluster administrator for multi-tenancy. However, for some resources such as network policies, tenants can configure additional instances of the resource for their workloads.
|
||||
|
||||
**Rationale:** Tenants can escalate privileges and impact other tenants if they can delete or modify required multi-tenancy resources such as namespace resource quotas or default network policy.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
- egress:
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
capsule-oil-1 <none> 7m5s
|
||||
```
|
||||
|
||||
As tenant owner try to modify or delete one of the networkpolicies
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice delete networkpolicies capsule-oil-0
|
||||
```
|
||||
|
||||
You should receive an error message denying the edit/delete request
|
||||
|
||||
```bash
|
||||
Error from server (Forbidden): networkpolicies.networking.k8s.io "capsule-oil-0" is forbidden:
|
||||
User "oil" cannot delete resource "networkpolicies" in API group "networking.k8s.io" in the namespace "oil-production"
|
||||
```
|
||||
|
||||
As tenant owner, you can create an additional networkpolicy inside the namespace
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: hijacking
|
||||
namespace: oil-production
|
||||
spec:
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
EOF
|
||||
```
|
||||
|
||||
However, due to the additive nature of networkpolicies, the `DENY ALL` policy set by the cluster admin, prevents hijacking.
|
||||
|
||||
As tenant owner list RBAC permissions set by Capsule
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get rolebindings
|
||||
NAME ROLE AGE
|
||||
namespace-deleter ClusterRole/capsule-namespace-deleter 11h
|
||||
namespace:admin ClusterRole/admin 11h
|
||||
```
|
||||
|
||||
As tenant owner, try to change/delete the rolebinding to escalate permissions
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice edit/delete rolebinding namespace:admin
|
||||
```
|
||||
|
||||
The rolebinding is immediately recreated by Capsule:
|
||||
|
||||
```
|
||||
kubectl --kubeconfig alice get rolebindings
|
||||
NAME ROLE AGE
|
||||
namespace-deleter ClusterRole/capsule-namespace-deleter 11h
|
||||
namespace:admin ClusterRole/admin 2s
|
||||
```
|
||||
|
||||
However, the tenant owner can create and assign permissions inside the namespace she owns
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
name: oil-robot:admin
|
||||
namespace: oil-production
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: oil-production
|
||||
EOF
|
||||
```
|
||||
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
97
docs/operator/mtb/block-access-to-other-tenant-resources.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Block access to other tenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Each tenant has its own set of resources, such as namespaces, service accounts, secrets, pods, services, etc. Tenants should not be allowed to access each other's resources.
|
||||
|
||||
**Rationale:** Tenant's resources must be not accessible by other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a couple of tenants
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: joe
|
||||
EOF
|
||||
|
||||
./create-user.sh joe gas
|
||||
|
||||
```
|
||||
|
||||
As `oil` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As `gas` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
```
|
||||
|
||||
|
||||
As `oil` tenant owner, try to retrieve the resources in the `gas` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get serviceaccounts --namespace gas-production
|
||||
```
|
||||
|
||||
You must receive an error message:
|
||||
|
||||
```
|
||||
Error from server (Forbidden): serviceaccount is forbidden:
|
||||
User "oil" cannot list resource "serviceaccounts" in API group "" in the namespace "gas-production"
|
||||
```
|
||||
|
||||
As `gas` tenant owner, try to retrieve the resources in the `oil` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe get serviceaccounts --namespace oil-production
|
||||
```
|
||||
|
||||
You must receive an error message:
|
||||
|
||||
```
|
||||
Error from server (Forbidden): serviceaccount is forbidden:
|
||||
User "joe" cannot list resource "serviceaccounts" in API group "" in the namespace "oil-production"
|
||||
```
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
121
docs/operator/mtb/block-add-capabilities.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Block add capabilities
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control Linux capabilities.
|
||||
|
||||
**Rationale:** Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
# The default set of capabilities are implicitly allowed
|
||||
# The empty set means that no additional capabilities may be added beyond the default set
|
||||
allowedCapabilities: []
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod and see new capabilities cannot be added in the tenant namespaces
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-settime-cap
|
||||
namespace:
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_TIME
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by PodSecurityPolicy.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
69
docs/operator/mtb/block-modification-of-resource-quotas.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Block modification of resource quotas
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Tenants should not be able to modify the resource quotas defined in their namespaces
|
||||
|
||||
**Rationale:** Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
resourceQuotas:
|
||||
items:
|
||||
- hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
- hard:
|
||||
pods: "10"
|
||||
services: "50"
|
||||
- hard:
|
||||
requests.storage: 100Gi
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, check the permissions to modify/delete the quota in the tenant namespace:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create quota
|
||||
kubectl --kubeconfig alice auth can-i update quota
|
||||
kubectl --kubeconfig alice auth can-i patch quota
|
||||
kubectl --kubeconfig alice auth can-i delete quota
|
||||
kubectl --kubeconfig alice auth can-i deletecollection quota
|
||||
```
|
||||
|
||||
Each command must return 'no'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
@@ -0,0 +1,107 @@
|
||||
# Block access to multitenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Block network traffic among namespaces from different tenants.
|
||||
|
||||
**Rationale:** Tenants cannot access services and pods in another tenant's namespaces.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a couple of tenants
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: joe
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh joe gas
|
||||
```
|
||||
|
||||
As `oil` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
kubectl --kubeconfig alice run webserver --image nginx:latest
|
||||
kubectl --kubeconfig alice expose pod webserver --port 80
|
||||
```
|
||||
|
||||
As `gas` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
kubectl --kubeconfig joe run webserver --image nginx:latest
|
||||
kubectl --kubeconfig joe expose pod webserver --port 80
|
||||
```
|
||||
|
||||
As `oil` tenant owner, verify you can access the service in `oil` tenant namespace but not in the `gas` tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
|
||||
Viceversa, as `gas` tenant owner, verify you can access the service in `gas` tenant namespace but not in the `oil` tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
115
docs/operator/mtb/block-privilege-escalation.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Block privilege escalation
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control container permissions.
|
||||
|
||||
**Rationale:** The security `allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional privileges.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that sets `allowPrivilegeEscalation=false` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets `allowPrivilegeEscalation=true` in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-priviliged-mode
|
||||
namespace: oil-production
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
116
docs/operator/mtb/block-privileged-containers.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Block privileged containers
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control container permissions.
|
||||
|
||||
**Rationale:** By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that sets `privileged=false` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets privileges in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-priviliged-mode
|
||||
namespace:
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
@@ -0,0 +1,40 @@
|
||||
# Block use of existing PVs
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Data Isolation
|
||||
|
||||
**Description:** Avoid a tenant to mount existing volumes`.
|
||||
|
||||
**Rationale:** Tenants have to be assured that their Persistent Volumes cannot be reclaimed by other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, check if you can access the persistent volumes
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i list persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i watch persistentvolumes
|
||||
```
|
||||
|
||||
You must receive for all the requests 'no'.
|
||||