mirror of
https://github.com/projectcapsule/capsule.git
synced 2026-03-02 01:30:17 +00:00
Compare commits
95 Commits
v0.1.0-rc3
...
v0.1.0-rc6
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a2fda44110 | ||
|
|
06330cf992 | ||
|
|
1ec9936158 | ||
|
|
694b519af8 | ||
|
|
0b34f04291 | ||
|
|
a702ef2af2 | ||
|
|
04d91af9f5 | ||
|
|
8949be7497 | ||
|
|
df08c9e63e | ||
|
|
07daffd669 | ||
|
|
3a42b90221 | ||
|
|
09277e9f3d | ||
|
|
47794c0cf8 | ||
|
|
e24394f329 | ||
|
|
01053d5deb | ||
|
|
b749e34547 | ||
|
|
82480f3afd | ||
|
|
88a9c242a4 | ||
|
|
651c62ff4a | ||
|
|
dcb8b784d5 | ||
|
|
7a698633d7 | ||
|
|
894ea5016b | ||
|
|
e4e3283b90 | ||
|
|
007f0083c2 | ||
|
|
bc6fc920d3 | ||
|
|
01b511b509 | ||
|
|
6223b1c297 | ||
|
|
d5158f06be | ||
|
|
047f4a0ff7 | ||
|
|
71cdb45925 | ||
|
|
9182895811 | ||
|
|
2eceb0935a | ||
|
|
8ead555743 | ||
|
|
57bf3d1c1b | ||
|
|
bb58e90f5d | ||
|
|
f8fa87a998 | ||
|
|
b3658b7bfc | ||
|
|
54d0201161 | ||
|
|
44ffe0ddf5 | ||
|
|
491ab71842 | ||
|
|
4e9dbf8690 | ||
|
|
34614015a0 | ||
|
|
737fb26e39 | ||
|
|
b56015922f | ||
|
|
ddb9ffd79e | ||
|
|
cae65c9f84 | ||
|
|
befcf65bdd | ||
|
|
e1d98334a2 | ||
|
|
848c6d99c2 | ||
|
|
bd12068397 | ||
|
|
4604e44c37 | ||
|
|
31863b53af | ||
|
|
7a055fcb9f | ||
|
|
29ab5ca64a | ||
|
|
c52f7844db | ||
|
|
9244122d42 | ||
|
|
f883e7b662 | ||
|
|
2f5f31b678 | ||
|
|
e7ef9642ad | ||
|
|
34f73af5c4 | ||
|
|
18912a002b | ||
|
|
d43ad2f9f8 | ||
|
|
9a595877ce | ||
|
|
c0d4aab582 | ||
|
|
6761fb93dc | ||
|
|
bf9e0f6b10 | ||
|
|
f937942c49 | ||
|
|
89d7f301c6 | ||
|
|
2a6ff09340 | ||
|
|
35f48107fc | ||
|
|
7aa62b6f1d | ||
|
|
58645f39bb | ||
|
|
0e55823a0c | ||
|
|
ba690480a7 | ||
|
|
faa2306a30 | ||
|
|
c1448c82e9 | ||
|
|
776a56b5bc | ||
|
|
e4883bb737 | ||
|
|
e70afb5e77 | ||
|
|
ee7af18f98 | ||
|
|
ac7de3bf88 | ||
|
|
8883b15aa9 | ||
|
|
e23132c820 | ||
|
|
bec59a585e | ||
|
|
9c649ac7eb | ||
|
|
3455aed503 | ||
|
|
ad1edf57ac | ||
|
|
d64dcb5a44 | ||
|
|
76d7697703 | ||
|
|
96f4f31c17 | ||
|
|
c3f9dfe652 | ||
|
|
502e9a556f | ||
|
|
6f208a6e0e | ||
|
|
1fb52003d5 | ||
|
|
98e1640d9b |
21
.github/workflows/ci.yml
vendored
21
.github/workflows/ci.yml
vendored
@@ -23,22 +23,15 @@ jobs:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: go-mod
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
- run: make installer
|
||||
- name: Checking if YAML installer file is not aligned
|
||||
run: if [[ $(git diff | wc -l) -gt 0 ]]; then echo ">>> Untracked generated files have not been committed" && git --no-pager diff && exit 1; fi
|
||||
- name: Checking if manifests generated untracked files
|
||||
- name: Checking if YAML installer generated untracked files
|
||||
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
|
||||
- name: Checking if source code is not formatted
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
|
||||
50
.github/workflows/docker-ci.yml
vendored
50
.github/workflows/docker-ci.yml
vendored
@@ -10,12 +10,27 @@ jobs:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
|
||||
-
|
||||
name: Checkout
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
-
|
||||
name: Docker meta
|
||||
- name: Generate build-args
|
||||
id: build-args
|
||||
run: |
|
||||
# Declare vars for internal use
|
||||
VERSION=$(git describe --abbrev=0 --tags)
|
||||
GIT_HEAD_COMMIT=$(git rev-parse --short HEAD)
|
||||
GIT_TAG_COMMIT=$(git rev-parse --short $VERSION)
|
||||
GIT_MODIFIED_1=$(git diff $GIT_HEAD_COMMIT $GIT_TAG_COMMIT --quiet && echo "" || echo ".dev")
|
||||
GIT_MODIFIED_2=$(git diff --quiet && echo "" || echo ".dirty")
|
||||
# Export to GH_ENV
|
||||
echo "GIT_LAST_TAG=$VERSION" >> $GITHUB_ENV
|
||||
echo "GIT_HEAD_COMMIT=$GIT_HEAD_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_TAG_COMMIT=$GIT_TAG_COMMIT" >> $GITHUB_ENV
|
||||
echo "GIT_MODIFIED=$(echo "$GIT_MODIFIED_1""$GIT_MODIFIED_2")" >> $GITHUB_ENV
|
||||
echo "GIT_REPO=$(git config --get remote.origin.url)" >> $GITHUB_ENV
|
||||
echo "BUILD_DATE=$(git log -1 --format="%at" | xargs -I{} date -d @{} +%Y-%m-%dT%H:%M:%S)" >> $GITHUB_ENV
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
@@ -26,22 +41,19 @@ jobs:
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
-
|
||||
name: Set up QEMU
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v1
|
||||
with:
|
||||
platforms: arm64,arm
|
||||
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
install: true
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
-
|
||||
name: Inspect builder
|
||||
- name: Inspect builder
|
||||
run: |
|
||||
echo "Name: ${{ steps.buildx.outputs.name }}"
|
||||
echo "Endpoint: ${{ steps.buildx.outputs.endpoint }}"
|
||||
@@ -49,16 +61,14 @@ jobs:
|
||||
echo "Flags: ${{ steps.buildx.outputs.flags }}"
|
||||
echo "Platforms: ${{ steps.buildx.outputs.platforms }}"
|
||||
|
||||
-
|
||||
name: Login to quay.io Container Registry
|
||||
- name: Login to quay.io Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: quay.io
|
||||
username: ${{ github.repository_owner }}+github
|
||||
password: ${{ secrets.BOT_QUAY_IO }}
|
||||
|
||||
-
|
||||
name: Build and push
|
||||
- name: Build and push
|
||||
id: build-release
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -67,7 +77,13 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm64,linux/arm
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
GIT_HEAD_COMMIT=${{ env.GIT_HEAD_COMMIT }}
|
||||
GIT_TAG_COMMIT=${{ env.GIT_TAG_COMMIT }}
|
||||
GIT_REPO=${{ env.GIT_REPO }}
|
||||
GIT_LAST_TAG=${{ env.GIT_LAST_TAG }}
|
||||
GIT_MODIFIED=${{ env.GIT_MODIFIED }}
|
||||
BUILD_DATE=${{ env.BUILD_DATE }}
|
||||
|
||||
-
|
||||
name: Image digest
|
||||
- name: Image digest
|
||||
run: echo ${{ steps.build-release.outputs.digest }}
|
||||
|
||||
19
.github/workflows/e2e.yml
vendored
19
.github/workflows/e2e.yml
vendored
@@ -11,25 +11,15 @@ jobs:
|
||||
name: Kubernetes
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.0']
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.0']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules and Docker images
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: gomod-docker
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/var/lib/docker
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ matrix.k8s-version }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ matrix.k8s-version }}-build-
|
||||
${{ matrix.k8s-version }}-
|
||||
go-version: '^1.16'
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
@@ -39,10 +29,11 @@ jobs:
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.8'
|
||||
go-version: '^1.16'
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
version: v0.11.1
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
|
||||
18
Makefile
18
Makefile
@@ -47,22 +47,26 @@ manager: generate fmt vet
|
||||
run: generate manifests
|
||||
go run ./main.go
|
||||
|
||||
# Creates the single file to install Capsule without any external dependency
|
||||
installer: manifests kustomize
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default > config/install.yaml
|
||||
|
||||
# Install CRDs into a cluster
|
||||
install: manifests kustomize
|
||||
install: installer
|
||||
$(KUSTOMIZE) build config/crd | kubectl apply -f -
|
||||
|
||||
# Uninstall CRDs from a cluster
|
||||
uninstall: manifests kustomize
|
||||
uninstall: installer
|
||||
$(KUSTOMIZE) build config/crd | kubectl delete -f -
|
||||
|
||||
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
|
||||
deploy: manifests kustomize
|
||||
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
|
||||
$(KUSTOMIZE) build config/default | kubectl apply -f -
|
||||
deploy: installer
|
||||
kubectl apply -f config/install.yaml
|
||||
|
||||
# Remove controller in the configured Kubernetes cluster in ~/.kube/config
|
||||
remove: manifests kustomize
|
||||
$(KUSTOMIZE) build config/default | kubectl delete -f -
|
||||
remove: installer
|
||||
kubectl delete -f config/install.yaml
|
||||
kubectl delete clusterroles.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found
|
||||
kubectl delete clusterrolebindings.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found
|
||||
|
||||
|
||||
23
README.md
23
README.md
@@ -61,17 +61,28 @@ Make sure you have access to a Kubernetes cluster as administrator.
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](./charts/capsule/README.md)
|
||||
* Use [`kustomize`](https://github.com/kubernetes-sigs/kustomize)
|
||||
* Use the [single YAML file installer](./config/install.yaml)
|
||||
|
||||
## Install with kustomize
|
||||
Ensure you have `kubectl` and `kustomize` installed in your `PATH`.
|
||||
## Install with the single YAML file installer
|
||||
|
||||
Ensure you have `kubectl` installed in your `PATH`.
|
||||
|
||||
Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/clastix/capsule
|
||||
$ cd capsule
|
||||
$ make deploy
|
||||
$ kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/master/config/install.yaml
|
||||
namespace/capsule-system created
|
||||
customresourcedefinition.apiextensions.k8s.io/capsuleconfigurations.capsule.clastix.io created
|
||||
customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io created
|
||||
clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding created
|
||||
secret/capsule-ca created
|
||||
secret/capsule-tls created
|
||||
service/capsule-controller-manager-metrics-service created
|
||||
service/capsule-webhook-service created
|
||||
deployment.apps/capsule-controller-manager created
|
||||
capsuleconfiguration.capsule.clastix.io/capsule-default created
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration created
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration created
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
)
|
||||
|
||||
// CapsuleConfigurationSpec defines the Capsule configuration
|
||||
// nolint:maligned
|
||||
type CapsuleConfigurationSpec struct {
|
||||
// Names of the groups for Capsule users.
|
||||
// +kubebuilder:default={capsule.clastix.io}
|
||||
@@ -19,15 +18,6 @@ type CapsuleConfigurationSpec struct {
|
||||
ForceTenantPrefix bool `json:"forceTenantPrefix,omitempty"`
|
||||
// Disallow creation of namespaces, whose name matches this regexp
|
||||
ProtectedNamespaceRegexpString string `json:"protectedNamespaceRegex,omitempty"`
|
||||
// When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed.
|
||||
// Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of
|
||||
// two or more Tenant resources although sharing the same allowed hostname(s).
|
||||
//
|
||||
// The JSON path of the resource is: /spec/ingressHostnames/allowed
|
||||
AllowTenantIngressHostnamesCollision bool `json:"allowTenantIngressHostnamesCollision,omitempty"`
|
||||
// Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
// +kubebuilder:default=true
|
||||
AllowIngressHostnameCollision bool `json:"allowIngressHostnameCollision,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
|
||||
@@ -10,63 +10,68 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/controller-runtime/pkg/conversion"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
const (
|
||||
resourceQuotaScopeAnnotation = "capsule.clastix.io/resource-quota-scope"
|
||||
|
||||
podAllowedImagePullPolicyAnnotation = "capsule.clastix.io/allowed-image-pull-policy"
|
||||
|
||||
podPriorityAllowedAnnotation = "priorityclass.capsule.clastix.io/allowed"
|
||||
podPriorityAllowedRegexAnnotation = "priorityclass.capsule.clastix.io/allowed-regex"
|
||||
|
||||
enableNodePortsAnnotation = "capsule.clastix.io/enable-node-ports"
|
||||
enableNodePortsAnnotation = "capsule.clastix.io/enable-node-ports"
|
||||
enableExternalNameAnnotation = "capsule.clastix.io/enable-external-name"
|
||||
|
||||
ownerGroupsAnnotation = "owners.capsule.clastix.io/group"
|
||||
ownerUsersAnnotation = "owners.capsule.clastix.io/user"
|
||||
ownerServiceAccountAnnotation = "owners.capsule.clastix.io/serviceaccount"
|
||||
|
||||
enableNodeListingAnnotation = "capsule.clastix.io/enable-node-listing"
|
||||
enableNodeUpdateAnnotation = "capsule.clastix.io/enable-node-update"
|
||||
enableNodeDeletionAnnotation = "capsule.clastix.io/enable-node-deletion"
|
||||
enableStorageClassListingAnnotation = "capsule.clastix.io/enable-storageclass-listing"
|
||||
enableStorageClassUpdateAnnotation = "capsule.clastix.io/enable-storageclass-update"
|
||||
enableStorageClassDeletionAnnotation = "capsule.clastix.io/enable-storageclass-deletion"
|
||||
enableIngressClassListingAnnotation = "capsule.clastix.io/enable-ingressclass-listing"
|
||||
enableIngressClassUpdateAnnotation = "capsule.clastix.io/enable-ingressclass-update"
|
||||
enableIngressClassDeletionAnnotation = "capsule.clastix.io/enable-ingressclass-deletion"
|
||||
enableNodeListingAnnotation = "capsule.clastix.io/enable-node-listing"
|
||||
enableNodeUpdateAnnotation = "capsule.clastix.io/enable-node-update"
|
||||
enableNodeDeletionAnnotation = "capsule.clastix.io/enable-node-deletion"
|
||||
enableStorageClassListingAnnotation = "capsule.clastix.io/enable-storageclass-listing"
|
||||
enableStorageClassUpdateAnnotation = "capsule.clastix.io/enable-storageclass-update"
|
||||
enableStorageClassDeletionAnnotation = "capsule.clastix.io/enable-storageclass-deletion"
|
||||
enableIngressClassListingAnnotation = "capsule.clastix.io/enable-ingressclass-listing"
|
||||
enableIngressClassUpdateAnnotation = "capsule.clastix.io/enable-ingressclass-update"
|
||||
enableIngressClassDeletionAnnotation = "capsule.clastix.io/enable-ingressclass-deletion"
|
||||
enablePriorityClassListingAnnotation = "capsule.clastix.io/enable-priorityclass-listing"
|
||||
enablePriorityClassUpdateAnnotation = "capsule.clastix.io/enable-priorityclass-update"
|
||||
enablePriorityClassDeletionAnnotation = "capsule.clastix.io/enable-priorityclass-deletion"
|
||||
|
||||
listOperation = "List"
|
||||
updateOperation = "Update"
|
||||
deleteOperation = "Delete"
|
||||
|
||||
nodesServiceKind = "Nodes"
|
||||
storageClassesServiceKind = "StorageClasses"
|
||||
ingressClassesServiceKind = "IngressClasses"
|
||||
ingressHostnameCollisionScope = "ingress.capsule.clastix.io/hostname-collision-scope"
|
||||
)
|
||||
|
||||
func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() []capsulev1beta1.OwnerSpec {
|
||||
func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
var serviceKindToAnnotationMap = map[capsulev1beta1.ProxyServiceKind][]string{
|
||||
nodesServiceKind: {enableNodeListingAnnotation, enableNodeUpdateAnnotation, enableNodeDeletionAnnotation},
|
||||
storageClassesServiceKind: {enableStorageClassListingAnnotation, enableStorageClassUpdateAnnotation, enableStorageClassDeletionAnnotation},
|
||||
ingressClassesServiceKind: {enableIngressClassListingAnnotation, enableIngressClassUpdateAnnotation, enableIngressClassDeletionAnnotation},
|
||||
capsulev1beta1.NodesProxy: {enableNodeListingAnnotation, enableNodeUpdateAnnotation, enableNodeDeletionAnnotation},
|
||||
capsulev1beta1.StorageClassesProxy: {enableStorageClassListingAnnotation, enableStorageClassUpdateAnnotation, enableStorageClassDeletionAnnotation},
|
||||
capsulev1beta1.IngressClassesProxy: {enableIngressClassListingAnnotation, enableIngressClassUpdateAnnotation, enableIngressClassDeletionAnnotation},
|
||||
capsulev1beta1.PriorityClassesProxy: {enablePriorityClassListingAnnotation, enablePriorityClassUpdateAnnotation, enablePriorityClassDeletionAnnotation},
|
||||
}
|
||||
var annotationToOperationMap = map[string]capsulev1beta1.ProxyOperation{
|
||||
enableNodeListingAnnotation: listOperation,
|
||||
enableNodeUpdateAnnotation: updateOperation,
|
||||
enableNodeDeletionAnnotation: deleteOperation,
|
||||
enableStorageClassListingAnnotation: listOperation,
|
||||
enableStorageClassUpdateAnnotation: updateOperation,
|
||||
enableStorageClassDeletionAnnotation: deleteOperation,
|
||||
enableIngressClassListingAnnotation: listOperation,
|
||||
enableIngressClassUpdateAnnotation: updateOperation,
|
||||
enableIngressClassDeletionAnnotation: deleteOperation,
|
||||
enableNodeListingAnnotation: capsulev1beta1.ListOperation,
|
||||
enableNodeUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enableNodeDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
enableStorageClassListingAnnotation: capsulev1beta1.ListOperation,
|
||||
enableStorageClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enableStorageClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
enableIngressClassListingAnnotation: capsulev1beta1.ListOperation,
|
||||
enableIngressClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enableIngressClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
enablePriorityClassListingAnnotation: capsulev1beta1.ListOperation,
|
||||
enablePriorityClassUpdateAnnotation: capsulev1beta1.UpdateOperation,
|
||||
enablePriorityClassDeletionAnnotation: capsulev1beta1.DeleteOperation,
|
||||
}
|
||||
var annotationToOwnerKindMap = map[string]capsulev1beta1.OwnerKind{
|
||||
ownerUsersAnnotation: "User",
|
||||
ownerGroupsAnnotation: "Group",
|
||||
ownerServiceAccountAnnotation: "ServiceAccount",
|
||||
ownerUsersAnnotation: capsulev1beta1.UserOwner,
|
||||
ownerGroupsAnnotation: capsulev1beta1.GroupOwner,
|
||||
ownerServiceAccountAnnotation: capsulev1beta1.ServiceAccountOwner,
|
||||
}
|
||||
annotations := t.GetAnnotations()
|
||||
|
||||
@@ -86,7 +91,7 @@ func (t *Tenant) convertV1Alpha1OwnerToV1Beta1() []capsulev1beta1.OwnerSpec {
|
||||
}
|
||||
}
|
||||
|
||||
var owners []capsulev1beta1.OwnerSpec
|
||||
var owners capsulev1beta1.OwnerListSpec
|
||||
|
||||
var getProxySettingsForOwner = func(ownerName string) (settings []capsulev1beta1.ProxySettings) {
|
||||
ownerOperations, ok := operations[ownerName]
|
||||
@@ -131,21 +136,34 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst.ObjectMeta = t.ObjectMeta
|
||||
|
||||
// Spec
|
||||
dst.Spec.NamespaceQuota = t.Spec.NamespaceQuota
|
||||
if t.Spec.NamespaceQuota != nil {
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.Quota = t.Spec.NamespaceQuota
|
||||
}
|
||||
|
||||
dst.Spec.NodeSelector = t.Spec.NodeSelector
|
||||
|
||||
dst.Spec.Owners = t.convertV1Alpha1OwnerToV1Beta1()
|
||||
|
||||
if t.Spec.NamespacesMetadata != nil {
|
||||
dst.Spec.NamespacesMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
if dst.Spec.NamespaceOptions == nil {
|
||||
dst.Spec.NamespaceOptions = &capsulev1beta1.NamespaceOptions{}
|
||||
}
|
||||
dst.Spec.NamespaceOptions.AdditionalMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: t.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
}
|
||||
}
|
||||
if t.Spec.ServicesMetadata != nil {
|
||||
dst.Spec.ServicesMetadata = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: &capsulev1beta1.AdditionalMetadataSpec{
|
||||
Labels: t.Spec.ServicesMetadata.AdditionalLabels,
|
||||
Annotations: t.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
if t.Spec.StorageClasses != nil {
|
||||
@@ -154,14 +172,22 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
Regex: t.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if v, ok := t.Annotations[ingressHostnameCollisionScope]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.HostnameCollisionScopeCluster), string(capsulev1beta1.HostnameCollisionScopeTenant), string(capsulev1beta1.HostnameCollisionScopeNamespace):
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScope(v)
|
||||
default:
|
||||
dst.Spec.IngressOptions.HostnameCollisionScope = capsulev1beta1.HostnameCollisionScopeDisabled
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressClasses != nil {
|
||||
dst.Spec.IngressClasses = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedClasses = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressClasses.Exact,
|
||||
Regex: t.Spec.IngressClasses.Regex,
|
||||
}
|
||||
}
|
||||
if t.Spec.IngressHostnames != nil {
|
||||
dst.Spec.IngressHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
dst.Spec.IngressOptions.AllowedHostnames = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: t.Spec.IngressHostnames.Exact,
|
||||
Regex: t.Spec.IngressHostnames.Regex,
|
||||
}
|
||||
@@ -184,6 +210,17 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
if len(t.Spec.ResourceQuota) > 0 {
|
||||
dst.Spec.ResourceQuota = &capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: func() capsulev1beta1.ResourceQuotaScope {
|
||||
if v, ok := t.GetAnnotations()[resourceQuotaScopeAnnotation]; ok {
|
||||
switch v {
|
||||
case string(capsulev1beta1.ResourceQuotaScopeNamespace):
|
||||
return capsulev1beta1.ResourceQuotaScopeNamespace
|
||||
case string(capsulev1beta1.ResourceQuotaScopeTenant):
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}
|
||||
}
|
||||
return capsulev1beta1.ResourceQuotaScopeTenant
|
||||
}(),
|
||||
Items: t.Spec.ResourceQuota,
|
||||
}
|
||||
}
|
||||
@@ -196,13 +233,15 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
if t.Spec.ExternalServiceIPs != nil {
|
||||
var allowedIPs []capsulev1beta1.AllowedIP
|
||||
for _, IP := range t.Spec.ExternalServiceIPs.Allowed {
|
||||
allowedIPs = append(allowedIPs, capsulev1beta1.AllowedIP(IP))
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: make([]capsulev1beta1.AllowedIP, len(t.Spec.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
dst.Spec.ExternalServiceIPs = &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: allowedIPs,
|
||||
for i, IP := range t.Spec.ExternalServiceIPs.Allowed {
|
||||
dst.Spec.ServiceOptions.ExternalServiceIPs.Allowed[i] = capsulev1beta1.AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,7 +273,28 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableNodePortsAnnotation, t.GetName()))
|
||||
}
|
||||
dst.Spec.EnableNodePorts = val
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.AllowedServices.NodePort = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
enableExternalName, ok := annotations[enableExternalNameAnnotation]
|
||||
if ok {
|
||||
val, err := strconv.ParseBool(enableExternalName)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, fmt.Sprintf("unable to parse %s annotation on tenant %s", enableExternalNameAnnotation, t.GetName()))
|
||||
}
|
||||
if dst.Spec.ServiceOptions == nil {
|
||||
dst.Spec.ServiceOptions = &capsulev1beta1.ServiceOptions{}
|
||||
}
|
||||
if dst.Spec.ServiceOptions.AllowedServices == nil {
|
||||
dst.Spec.ServiceOptions.AllowedServices = &capsulev1beta1.AllowedServices{}
|
||||
}
|
||||
dst.Spec.ServiceOptions.AllowedServices.ExternalName = pointer.BoolPtr(val)
|
||||
}
|
||||
|
||||
// Status
|
||||
@@ -248,6 +308,7 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, podPriorityAllowedAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, podPriorityAllowedRegexAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableNodePortsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableExternalNameAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerGroupsAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerUsersAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ownerServiceAccountAnnotation)
|
||||
@@ -260,6 +321,11 @@ func (t *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
delete(dst.ObjectMeta.Annotations, enableIngressClassListingAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableIngressClassUpdateAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enableIngressClassDeletionAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassListingAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassUpdateAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, enablePriorityClassDeletionAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, resourceQuotaScopeAnnotation)
|
||||
delete(dst.ObjectMeta.Annotations, ingressHostnameCollisionScope)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -291,46 +357,57 @@ func (t *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
}
|
||||
} else {
|
||||
switch owner.Kind {
|
||||
case "User":
|
||||
case capsulev1beta1.UserOwner:
|
||||
ownersAnnotations[ownerUsersAnnotation] = append(ownersAnnotations[ownerUsersAnnotation], owner.Name)
|
||||
case "Group":
|
||||
case capsulev1beta1.GroupOwner:
|
||||
ownersAnnotations[ownerGroupsAnnotation] = append(ownersAnnotations[ownerGroupsAnnotation], owner.Name)
|
||||
case "ServiceAccount":
|
||||
case capsulev1beta1.ServiceAccountOwner:
|
||||
ownersAnnotations[ownerServiceAccountAnnotation] = append(ownersAnnotations[ownerServiceAccountAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
for _, setting := range owner.ProxyOperations {
|
||||
switch setting.Kind {
|
||||
case nodesServiceKind:
|
||||
case capsulev1beta1.NodesProxy:
|
||||
for _, operation := range setting.Operations {
|
||||
switch operation {
|
||||
case listOperation:
|
||||
case capsulev1beta1.ListOperation:
|
||||
proxyAnnotations[enableNodeListingAnnotation] = append(proxyAnnotations[enableNodeListingAnnotation], owner.Name)
|
||||
case updateOperation:
|
||||
case capsulev1beta1.UpdateOperation:
|
||||
proxyAnnotations[enableNodeUpdateAnnotation] = append(proxyAnnotations[enableNodeUpdateAnnotation], owner.Name)
|
||||
case deleteOperation:
|
||||
case capsulev1beta1.DeleteOperation:
|
||||
proxyAnnotations[enableNodeDeletionAnnotation] = append(proxyAnnotations[enableNodeDeletionAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
case storageClassesServiceKind:
|
||||
case capsulev1beta1.PriorityClassesProxy:
|
||||
for _, operation := range setting.Operations {
|
||||
switch operation {
|
||||
case listOperation:
|
||||
case capsulev1beta1.ListOperation:
|
||||
proxyAnnotations[enablePriorityClassListingAnnotation] = append(proxyAnnotations[enablePriorityClassListingAnnotation], owner.Name)
|
||||
case capsulev1beta1.UpdateOperation:
|
||||
proxyAnnotations[enablePriorityClassUpdateAnnotation] = append(proxyAnnotations[enablePriorityClassUpdateAnnotation], owner.Name)
|
||||
case capsulev1beta1.DeleteOperation:
|
||||
proxyAnnotations[enablePriorityClassDeletionAnnotation] = append(proxyAnnotations[enablePriorityClassDeletionAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
case capsulev1beta1.StorageClassesProxy:
|
||||
for _, operation := range setting.Operations {
|
||||
switch operation {
|
||||
case capsulev1beta1.ListOperation:
|
||||
proxyAnnotations[enableStorageClassListingAnnotation] = append(proxyAnnotations[enableStorageClassListingAnnotation], owner.Name)
|
||||
case updateOperation:
|
||||
case capsulev1beta1.UpdateOperation:
|
||||
proxyAnnotations[enableStorageClassUpdateAnnotation] = append(proxyAnnotations[enableStorageClassUpdateAnnotation], owner.Name)
|
||||
case deleteOperation:
|
||||
case capsulev1beta1.DeleteOperation:
|
||||
proxyAnnotations[enableStorageClassDeletionAnnotation] = append(proxyAnnotations[enableStorageClassDeletionAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
case ingressClassesServiceKind:
|
||||
case capsulev1beta1.IngressClassesProxy:
|
||||
for _, operation := range setting.Operations {
|
||||
switch operation {
|
||||
case listOperation:
|
||||
case capsulev1beta1.ListOperation:
|
||||
proxyAnnotations[enableIngressClassListingAnnotation] = append(proxyAnnotations[enableIngressClassListingAnnotation], owner.Name)
|
||||
case updateOperation:
|
||||
case capsulev1beta1.UpdateOperation:
|
||||
proxyAnnotations[enableIngressClassUpdateAnnotation] = append(proxyAnnotations[enableIngressClassUpdateAnnotation], owner.Name)
|
||||
case deleteOperation:
|
||||
case capsulev1beta1.DeleteOperation:
|
||||
proxyAnnotations[enableIngressClassDeletionAnnotation] = append(proxyAnnotations[enableIngressClassDeletionAnnotation], owner.Name)
|
||||
}
|
||||
}
|
||||
@@ -357,7 +434,10 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.ObjectMeta = src.ObjectMeta
|
||||
|
||||
// Spec
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceQuota
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.Quota != nil {
|
||||
t.Spec.NamespaceQuota = src.Spec.NamespaceOptions.Quota
|
||||
}
|
||||
|
||||
t.Spec.NodeSelector = src.Spec.NodeSelector
|
||||
|
||||
if t.Annotations == nil {
|
||||
@@ -366,16 +446,16 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
|
||||
t.convertV1Beta1OwnerToV1Alpha1(src)
|
||||
|
||||
if src.Spec.NamespacesMetadata != nil {
|
||||
if src.Spec.NamespaceOptions != nil && src.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.NamespacesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.NamespacesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.NamespacesMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.NamespaceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.NamespaceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.ServicesMetadata != nil {
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AdditionalMetadata != nil {
|
||||
t.Spec.ServicesMetadata = &AdditionalMetadataSpec{
|
||||
AdditionalLabels: src.Spec.ServicesMetadata.AdditionalLabels,
|
||||
AdditionalAnnotations: src.Spec.ServicesMetadata.AdditionalAnnotations,
|
||||
AdditionalLabels: src.Spec.ServiceOptions.AdditionalMetadata.Labels,
|
||||
AdditionalAnnotations: src.Spec.ServiceOptions.AdditionalMetadata.Annotations,
|
||||
}
|
||||
}
|
||||
if src.Spec.StorageClasses != nil {
|
||||
@@ -384,16 +464,17 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
Regex: src.Spec.StorageClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressClasses != nil {
|
||||
t.Annotations[ingressHostnameCollisionScope] = string(src.Spec.IngressOptions.HostnameCollisionScope)
|
||||
if src.Spec.IngressOptions.AllowedClasses != nil {
|
||||
t.Spec.IngressClasses = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressClasses.Exact,
|
||||
Regex: src.Spec.IngressClasses.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedClasses.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedClasses.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.IngressHostnames != nil {
|
||||
if src.Spec.IngressOptions.AllowedHostnames != nil {
|
||||
t.Spec.IngressHostnames = &AllowedListSpec{
|
||||
Exact: src.Spec.IngressHostnames.Exact,
|
||||
Regex: src.Spec.IngressHostnames.Regex,
|
||||
Exact: src.Spec.IngressOptions.AllowedHostnames.Exact,
|
||||
Regex: src.Spec.IngressOptions.AllowedHostnames.Regex,
|
||||
}
|
||||
}
|
||||
if src.Spec.ContainerRegistries != nil {
|
||||
@@ -409,6 +490,7 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
t.Spec.LimitRanges = src.Spec.LimitRanges.Items
|
||||
}
|
||||
if src.Spec.ResourceQuota != nil {
|
||||
t.Annotations[resourceQuotaScopeAnnotation] = string(src.Spec.ResourceQuota.Scope)
|
||||
t.Spec.ResourceQuota = src.Spec.ResourceQuota.Items
|
||||
}
|
||||
if len(src.Spec.AdditionalRoleBindings) > 0 {
|
||||
@@ -419,14 +501,13 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
})
|
||||
}
|
||||
}
|
||||
if src.Spec.ExternalServiceIPs != nil {
|
||||
var allowedIPs []AllowedIP
|
||||
for _, IP := range src.Spec.ExternalServiceIPs.Allowed {
|
||||
allowedIPs = append(allowedIPs, AllowedIP(IP))
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.ExternalServiceIPs != nil {
|
||||
t.Spec.ExternalServiceIPs = &ExternalServiceIPsSpec{
|
||||
Allowed: make([]AllowedIP, len(src.Spec.ServiceOptions.ExternalServiceIPs.Allowed)),
|
||||
}
|
||||
|
||||
t.Spec.ExternalServiceIPs = &ExternalServiceIPsSpec{
|
||||
Allowed: allowedIPs,
|
||||
for i, IP := range src.Spec.ServiceOptions.ExternalServiceIPs.Allowed {
|
||||
t.Spec.ExternalServiceIPs.Allowed[i] = AllowedIP(IP)
|
||||
}
|
||||
}
|
||||
if len(src.Spec.ImagePullPolicies) != 0 {
|
||||
@@ -446,7 +527,10 @@ func (t *Tenant) ConvertFrom(srcRaw conversion.Hub) error {
|
||||
}
|
||||
}
|
||||
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(src.Spec.EnableNodePorts)
|
||||
if src.Spec.ServiceOptions != nil && src.Spec.ServiceOptions.AllowedServices != nil {
|
||||
t.Annotations[enableNodePortsAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.NodePort)
|
||||
t.Annotations[enableExternalNameAnnotation] = strconv.FormatBool(*src.Spec.ServiceOptions.AllowedServices.ExternalName)
|
||||
}
|
||||
|
||||
// Status
|
||||
t.Status = TenantStatus{
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
@@ -35,13 +36,27 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Regex: "^foo*",
|
||||
}
|
||||
var v1beta1AdditionalMetadataSpec = &capsulev1beta1.AdditionalMetadataSpec{
|
||||
AdditionalLabels: map[string]string{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
AdditionalAnnotations: map[string]string{
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
var v1beta1NamespaceOptions = &capsulev1beta1.NamespaceOptions{
|
||||
Quota: &namespaceQuota,
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
}
|
||||
var v1beta1ServiceOptions = &capsulev1beta1.ServiceOptions{
|
||||
AdditionalMetadata: v1beta1AdditionalMetadataSpec,
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
NodePort: pointer.BoolPtr(false),
|
||||
ExternalName: pointer.BoolPtr(false),
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
}
|
||||
var v1beta1AllowedListSpec = &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{"foo", "bar"},
|
||||
Regex: "^foo*",
|
||||
@@ -114,7 +129,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Kind: "User",
|
||||
Name: "alice",
|
||||
@@ -163,6 +178,10 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Kind: "StorageClasses",
|
||||
Operations: []capsulev1beta1.ProxyOperation{"List"},
|
||||
},
|
||||
{
|
||||
Kind: "PriorityClasses",
|
||||
Operations: []capsulev1beta1.ProxyOperation{"List"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -210,12 +229,14 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
NamespaceQuota: &namespaceQuota,
|
||||
NamespacesMetadata: v1beta1AdditionalMetadataSpec,
|
||||
ServicesMetadata: v1beta1AdditionalMetadataSpec,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressClasses: v1beta1AllowedListSpec,
|
||||
IngressHostnames: v1beta1AllowedListSpec,
|
||||
NamespaceOptions: v1beta1NamespaceOptions,
|
||||
ServiceOptions: v1beta1ServiceOptions,
|
||||
StorageClasses: v1beta1AllowedListSpec,
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeDisabled,
|
||||
AllowedClasses: v1beta1AllowedListSpec,
|
||||
AllowedHostnames: v1beta1AllowedListSpec,
|
||||
},
|
||||
ContainerRegistries: v1beta1AllowedListSpec,
|
||||
NodeSelector: nodeSelector,
|
||||
NetworkPolicies: &capsulev1beta1.NetworkPolicySpec{
|
||||
@@ -225,6 +246,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Items: limitRanges,
|
||||
},
|
||||
ResourceQuota: &capsulev1beta1.ResourceQuotaSpec{
|
||||
Scope: capsulev1beta1.ResourceQuotaScopeNamespace,
|
||||
Items: resourceQuotas,
|
||||
},
|
||||
AdditionalRoleBindings: []capsulev1beta1.AdditionalRoleBindingsSpec{
|
||||
@@ -239,15 +261,11 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{"192.168.0.1"},
|
||||
},
|
||||
ImagePullPolicies: []capsulev1beta1.ImagePullPolicySpec{"Always", "IfNotPresent"},
|
||||
PriorityClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{"default"},
|
||||
Regex: "^tier-.*$",
|
||||
},
|
||||
EnableNodePorts: false,
|
||||
},
|
||||
Status: capsulev1beta1.TenantStatus{
|
||||
Size: 1,
|
||||
@@ -265,6 +283,7 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
Annotations: map[string]string{
|
||||
"foo": "bar",
|
||||
podAllowedImagePullPolicyAnnotation: "Always,IfNotPresent",
|
||||
enableExternalNameAnnotation: "false",
|
||||
enableNodePortsAnnotation: "false",
|
||||
podPriorityAllowedAnnotation: "default",
|
||||
podPriorityAllowedRegexAnnotation: "^tier-.*$",
|
||||
@@ -279,6 +298,9 @@ func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
enableIngressClassListingAnnotation: "alice,owner-foo,owner-bar",
|
||||
enableIngressClassUpdateAnnotation: "alice,bob",
|
||||
enableIngressClassDeletionAnnotation: "alice,jack",
|
||||
enablePriorityClassListingAnnotation: "jack",
|
||||
resourceQuotaScopeAnnotation: "Namespace",
|
||||
ingressHostnameCollisionScope: "Disabled",
|
||||
},
|
||||
},
|
||||
Spec: TenantSpec{
|
||||
|
||||
@@ -4,6 +4,6 @@
|
||||
package v1beta1
|
||||
|
||||
type AdditionalMetadataSpec struct {
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
AdditionalAnnotations map[string]string `json:"additionalAnnotations,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}
|
||||
|
||||
14
api/v1beta1/hostname_collision_scope.go
Normal file
14
api/v1beta1/hostname_collision_scope.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
const (
|
||||
HostnameCollisionScopeCluster HostnameCollisionScope = "Cluster"
|
||||
HostnameCollisionScopeTenant HostnameCollisionScope = "Tenant"
|
||||
HostnameCollisionScopeNamespace HostnameCollisionScope = "Namespace"
|
||||
HostnameCollisionScopeDisabled HostnameCollisionScope = "Disabled"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Enum=Cluster;Tenant;Namespace;Disabled
|
||||
type HostnameCollisionScope string
|
||||
24
api/v1beta1/ingress_options.go
Normal file
24
api/v1beta1/ingress_options.go
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type IngressOptions struct {
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
AllowedClasses *AllowedListSpec `json:"allowedClasses,omitempty"`
|
||||
// Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames.
|
||||
//
|
||||
//
|
||||
// - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule.
|
||||
//
|
||||
// - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant.
|
||||
//
|
||||
// - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace.
|
||||
//
|
||||
//
|
||||
// Optional.
|
||||
// +kubebuilder:default=Disabled
|
||||
HostnameCollisionScope HostnameCollisionScope `json:"hostnameCollisionScope,omitempty"`
|
||||
// Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
AllowedHostnames *AllowedListSpec `json:"allowedHostnames,omitempty"`
|
||||
}
|
||||
9
api/v1beta1/namespace_options.go
Normal file
9
api/v1beta1/namespace_options.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package v1beta1
|
||||
|
||||
type NamespaceOptions struct {
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
// Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
Quota *int32 `json:"quota,omitempty"`
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
}
|
||||
@@ -3,10 +3,12 @@
|
||||
|
||||
package v1beta1
|
||||
|
||||
// OwnerSpec defines tenant owner name and kind
|
||||
type OwnerSpec struct {
|
||||
Kind OwnerKind `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
// Kind of tenant owner. Possible values are "User", "Group", and "ServiceAccount"
|
||||
Kind OwnerKind `json:"kind"`
|
||||
// Name of tenant owner.
|
||||
Name string `json:"name"`
|
||||
// Proxy settings for tenant owner.
|
||||
ProxyOperations []ProxySettings `json:"proxySettings,omitempty"`
|
||||
}
|
||||
|
||||
@@ -29,9 +31,24 @@ func (p ProxyOperation) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=Nodes;StorageClasses;IngressClasses
|
||||
// +kubebuilder:validation:Enum=Nodes;StorageClasses;IngressClasses;PriorityClasses
|
||||
type ProxyServiceKind string
|
||||
|
||||
func (p ProxyServiceKind) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
const (
|
||||
NodesProxy ProxyServiceKind = "Nodes"
|
||||
StorageClassesProxy ProxyServiceKind = "StorageClasses"
|
||||
IngressClassesProxy ProxyServiceKind = "IngressClasses"
|
||||
PriorityClassesProxy ProxyServiceKind = "PriorityClasses"
|
||||
|
||||
ListOperation ProxyOperation = "List"
|
||||
UpdateOperation ProxyOperation = "Update"
|
||||
DeleteOperation ProxyOperation = "Delete"
|
||||
|
||||
UserOwner OwnerKind = "User"
|
||||
GroupOwner OwnerKind = "Group"
|
||||
ServiceAccountOwner OwnerKind = "ServiceAccount"
|
||||
)
|
||||
|
||||
34
api/v1beta1/owner_list.go
Normal file
34
api/v1beta1/owner_list.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type OwnerListSpec []OwnerSpec
|
||||
|
||||
func (o OwnerListSpec) FindOwner(name string, kind OwnerKind) (owner OwnerSpec) {
|
||||
sort.Sort(ByKindAndName(o))
|
||||
i := sort.Search(len(o), func(i int) bool {
|
||||
return o[i].Kind >= kind && o[i].Name >= name
|
||||
})
|
||||
|
||||
if i < len(o) && o[i].Kind == kind && o[i].Name == name {
|
||||
return o[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ByKindAndName OwnerListSpec
|
||||
|
||||
func (b ByKindAndName) Len() int {
|
||||
return len(b)
|
||||
}
|
||||
func (b ByKindAndName) Less(i, j int) bool {
|
||||
if b[i].Kind.String() != b[j].Kind.String() {
|
||||
return b[i].Kind.String() < b[j].Kind.String()
|
||||
}
|
||||
return b[i].Name < b[j].Name
|
||||
}
|
||||
func (b ByKindAndName) Swap(i, j int) {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
83
api/v1beta1/owner_list_test.go
Normal file
83
api/v1beta1/owner_list_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOwnerListSpec_FindOwner(t *testing.T) {
|
||||
var bla = OwnerSpec{
|
||||
Kind: UserOwner,
|
||||
Name: "bla",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: IngressClassesProxy,
|
||||
Operations: []ProxyOperation{"Delete"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var bar = OwnerSpec{
|
||||
Kind: GroupOwner,
|
||||
Name: "bar",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: StorageClassesProxy,
|
||||
Operations: []ProxyOperation{"Delete"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var baz = OwnerSpec{
|
||||
Kind: UserOwner,
|
||||
Name: "baz",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: StorageClassesProxy,
|
||||
Operations: []ProxyOperation{"Update"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var fim = OwnerSpec{
|
||||
Kind: ServiceAccountOwner,
|
||||
Name: "fim",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: NodesProxy,
|
||||
Operations: []ProxyOperation{"List"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var bom = OwnerSpec{
|
||||
Kind: GroupOwner,
|
||||
Name: "bom",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: StorageClassesProxy,
|
||||
Operations: []ProxyOperation{"Delete"},
|
||||
},
|
||||
{
|
||||
Kind: NodesProxy,
|
||||
Operations: []ProxyOperation{"Delete"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var qip = OwnerSpec{
|
||||
Kind: ServiceAccountOwner,
|
||||
Name: "qip",
|
||||
ProxyOperations: []ProxySettings{
|
||||
{
|
||||
Kind: StorageClassesProxy,
|
||||
Operations: []ProxyOperation{"List", "Delete"},
|
||||
},
|
||||
},
|
||||
}
|
||||
var owners = OwnerListSpec{bom, qip, bla, bar, baz, fim}
|
||||
|
||||
assert.Equal(t, owners.FindOwner("bom", GroupOwner), bom)
|
||||
assert.Equal(t, owners.FindOwner("qip", ServiceAccountOwner), qip)
|
||||
assert.Equal(t, owners.FindOwner("bla", UserOwner), bla)
|
||||
assert.Equal(t, owners.FindOwner("bar", GroupOwner), bar)
|
||||
assert.Equal(t, owners.FindOwner("baz", UserOwner), baz)
|
||||
assert.Equal(t, owners.FindOwner("fim", ServiceAccountOwner), fim)
|
||||
assert.Equal(t, owners.FindOwner("notfound", ServiceAccountOwner), OwnerSpec{})
|
||||
}
|
||||
@@ -5,6 +5,17 @@ package v1beta1
|
||||
|
||||
import corev1 "k8s.io/api/core/v1"
|
||||
|
||||
// +kubebuilder:validation:Enum=Tenant;Namespace
|
||||
type ResourceQuotaScope string
|
||||
|
||||
const (
|
||||
ResourceQuotaScopeTenant ResourceQuotaScope = "Tenant"
|
||||
ResourceQuotaScopeNamespace ResourceQuotaScope = "Namespace"
|
||||
)
|
||||
|
||||
type ResourceQuotaSpec struct {
|
||||
// +kubebuilder:default=Tenant
|
||||
// Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
Scope ResourceQuotaScope `json:"scope,omitempty"`
|
||||
Items []corev1.ResourceQuotaSpec `json:"items,omitempty"`
|
||||
}
|
||||
|
||||
13
api/v1beta1/service_allowed_types.go
Normal file
13
api/v1beta1/service_allowed_types.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type AllowedServices struct {
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
NodePort *bool `json:"nodePort,omitempty"`
|
||||
//+kubebuilder:default=true
|
||||
// Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
ExternalName *bool `json:"externalName,omitempty"`
|
||||
}
|
||||
13
api/v1beta1/service_options.go
Normal file
13
api/v1beta1/service_options.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package v1beta1
|
||||
|
||||
type ServiceOptions struct {
|
||||
// Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
AdditionalMetadata *AdditionalMetadataSpec `json:"additionalMetadata,omitempty"`
|
||||
// Block or deny certain type of Services. Optional.
|
||||
AllowedServices *AllowedServices `json:"allowedServices,omitempty"`
|
||||
// Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalIPs,omitempty"`
|
||||
}
|
||||
@@ -18,10 +18,10 @@ func (t *Tenant) IsCordoned() bool {
|
||||
|
||||
func (t *Tenant) IsFull() bool {
|
||||
// we don't have limits on assigned Namespaces
|
||||
if t.Spec.NamespaceQuota == nil {
|
||||
if t.Spec.NamespaceOptions == nil || t.Spec.NamespaceOptions.Quota == nil {
|
||||
return false
|
||||
}
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceQuota)
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceOptions.Quota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
@@ -36,3 +36,7 @@ func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
t.Status.Namespaces = l
|
||||
t.Status.Size = uint(len(l))
|
||||
}
|
||||
|
||||
func (t *Tenant) GetOwnerProxySettings(name string, kind OwnerKind) []ProxySettings {
|
||||
return t.Spec.Owners.FindOwner(name, kind).ProxyOperations
|
||||
}
|
||||
|
||||
@@ -3,18 +3,21 @@
|
||||
|
||||
package v1beta1
|
||||
|
||||
// +kubebuilder:validation:Enum=cordoned;active
|
||||
// +kubebuilder:validation:Enum=Cordoned;Active
|
||||
type tenantState string
|
||||
|
||||
const (
|
||||
TenantStateActive tenantState = "active"
|
||||
TenantStateCordoned tenantState = "cordoned"
|
||||
TenantStateActive tenantState = "Active"
|
||||
TenantStateCordoned tenantState = "Cordoned"
|
||||
)
|
||||
|
||||
// TenantStatus defines the observed state of Tenant
|
||||
// Returns the observed state of the Tenant
|
||||
type TenantStatus struct {
|
||||
//+kubebuilder:default=active
|
||||
State tenantState `json:"state"`
|
||||
Size uint `json:"size"`
|
||||
Namespaces []string `json:"namespaces,omitempty"`
|
||||
//+kubebuilder:default=Active
|
||||
// The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
State tenantState `json:"state"`
|
||||
// How many namespaces are assigned to the Tenant.
|
||||
Size uint `json:"size"`
|
||||
// List of namespaces assigned to the Tenant.
|
||||
Namespaces []string `json:"namespaces,omitempty"`
|
||||
}
|
||||
|
||||
@@ -9,27 +9,32 @@ import (
|
||||
|
||||
// TenantSpec defines the desired state of Tenant
|
||||
type TenantSpec struct {
|
||||
Owners []OwnerSpec `json:"owners"`
|
||||
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
NamespaceQuota *int32 `json:"namespaceQuota,omitempty"`
|
||||
NamespacesMetadata *AdditionalMetadataSpec `json:"namespacesMetadata,omitempty"`
|
||||
ServicesMetadata *AdditionalMetadataSpec `json:"servicesMetadata,omitempty"`
|
||||
StorageClasses *AllowedListSpec `json:"storageClasses,omitempty"`
|
||||
IngressClasses *AllowedListSpec `json:"ingressClasses,omitempty"`
|
||||
IngressHostnames *AllowedListSpec `json:"ingressHostnames,omitempty"`
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
NetworkPolicies *NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
LimitRanges *LimitRangesSpec `json:"limitRanges,omitempty"`
|
||||
ResourceQuota *ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
// Specifies the owners of the Tenant. Mandatory.
|
||||
Owners OwnerListSpec `json:"owners"`
|
||||
// Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
NamespaceOptions *NamespaceOptions `json:"namespaceOptions,omitempty"`
|
||||
// Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
ServiceOptions *ServiceOptions `json:"serviceOptions,omitempty"`
|
||||
// Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
StorageClasses *AllowedListSpec `json:"storageClasses,omitempty"`
|
||||
// Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
IngressOptions IngressOptions `json:"ingressOptions,omitempty"`
|
||||
// Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
// Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
NetworkPolicies *NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
// Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
LimitRanges *LimitRangesSpec `json:"limitRanges,omitempty"`
|
||||
// Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
ResourceQuota *ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
// Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
AdditionalRoleBindings []AdditionalRoleBindingsSpec `json:"additionalRoleBindings,omitempty"`
|
||||
ExternalServiceIPs *ExternalServiceIPsSpec `json:"externalServiceIPs,omitempty"`
|
||||
ImagePullPolicies []ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
|
||||
PriorityClasses *AllowedListSpec `json:"priorityClasses,omitempty"`
|
||||
|
||||
//+kubebuilder:default=true
|
||||
EnableNodePorts bool `json:"enableNodePorts,omitempty"`
|
||||
// Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
ImagePullPolicies []ImagePullPolicySpec `json:"imagePullPolicies,omitempty"`
|
||||
// Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
PriorityClasses *AllowedListSpec `json:"priorityClasses,omitempty"`
|
||||
}
|
||||
|
||||
//+kubebuilder:object:root=true
|
||||
@@ -37,7 +42,7 @@ type TenantSpec struct {
|
||||
//+kubebuilder:storageversion
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=tnt
|
||||
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.state",description="The actual state of the Tenant"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceQuota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace quota",type="integer",JSONPath=".spec.namespaceOptions.quota",description="The max amount of Namespaces can be created"
|
||||
// +kubebuilder:printcolumn:name="Namespace count",type="integer",JSONPath=".status.size",description="The total amount of Namespaces in use"
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
@@ -17,15 +17,15 @@ import (
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdditionalMetadataSpec) DeepCopyInto(out *AdditionalMetadataSpec) {
|
||||
*out = *in
|
||||
if in.AdditionalLabels != nil {
|
||||
in, out := &in.AdditionalLabels, &out.AdditionalLabels
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.AdditionalAnnotations != nil {
|
||||
in, out := &in.AdditionalAnnotations, &out.AdditionalAnnotations
|
||||
if in.Annotations != nil {
|
||||
in, out := &in.Annotations, &out.Annotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
@@ -83,6 +83,52 @@ func (in *AllowedListSpec) DeepCopy() *AllowedListSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AllowedServices) DeepCopyInto(out *AllowedServices) {
|
||||
*out = *in
|
||||
if in.NodePort != nil {
|
||||
in, out := &in.NodePort, &out.NodePort
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.ExternalName != nil {
|
||||
in, out := &in.ExternalName, &out.ExternalName
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedServices.
|
||||
func (in *AllowedServices) DeepCopy() *AllowedServices {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AllowedServices)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ByKindAndName) DeepCopyInto(out *ByKindAndName) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ByKindAndName, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByKindAndName.
|
||||
func (in ByKindAndName) DeepCopy() ByKindAndName {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ByKindAndName)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExternalServiceIPsSpec) DeepCopyInto(out *ExternalServiceIPsSpec) {
|
||||
*out = *in
|
||||
@@ -103,6 +149,31 @@ func (in *ExternalServiceIPsSpec) DeepCopy() *ExternalServiceIPsSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressOptions) DeepCopyInto(out *IngressOptions) {
|
||||
*out = *in
|
||||
if in.AllowedClasses != nil {
|
||||
in, out := &in.AllowedClasses, &out.AllowedClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AllowedHostnames != nil {
|
||||
in, out := &in.AllowedHostnames, &out.AllowedHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressOptions.
|
||||
func (in *IngressOptions) DeepCopy() *IngressOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LimitRangesSpec) DeepCopyInto(out *LimitRangesSpec) {
|
||||
*out = *in
|
||||
@@ -125,6 +196,31 @@ func (in *LimitRangesSpec) DeepCopy() *LimitRangesSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespaceOptions) DeepCopyInto(out *NamespaceOptions) {
|
||||
*out = *in
|
||||
if in.Quota != nil {
|
||||
in, out := &in.Quota, &out.Quota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalMetadata != nil {
|
||||
in, out := &in.AdditionalMetadata, &out.AdditionalMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceOptions.
|
||||
func (in *NamespaceOptions) DeepCopy() *NamespaceOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespaceOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) {
|
||||
*out = *in
|
||||
@@ -147,6 +243,27 @@ func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in OwnerListSpec) DeepCopyInto(out *OwnerListSpec) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(OwnerListSpec, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerListSpec.
|
||||
func (in OwnerListSpec) DeepCopy() OwnerListSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OwnerListSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerSpec) DeepCopyInto(out *OwnerSpec) {
|
||||
*out = *in
|
||||
@@ -211,6 +328,36 @@ func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceOptions) DeepCopyInto(out *ServiceOptions) {
|
||||
*out = *in
|
||||
if in.AdditionalMetadata != nil {
|
||||
in, out := &in.AdditionalMetadata, &out.AdditionalMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.AllowedServices != nil {
|
||||
in, out := &in.AllowedServices, &out.AllowedServices
|
||||
*out = new(AllowedServices)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceOptions.
|
||||
func (in *ServiceOptions) DeepCopy() *ServiceOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Tenant) DeepCopyInto(out *Tenant) {
|
||||
*out = *in
|
||||
@@ -275,24 +422,19 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
*out = *in
|
||||
if in.Owners != nil {
|
||||
in, out := &in.Owners, &out.Owners
|
||||
*out = make([]OwnerSpec, len(*in))
|
||||
*out = make(OwnerListSpec, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NamespaceQuota != nil {
|
||||
in, out := &in.NamespaceQuota, &out.NamespaceQuota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NamespacesMetadata != nil {
|
||||
in, out := &in.NamespacesMetadata, &out.NamespacesMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
if in.NamespaceOptions != nil {
|
||||
in, out := &in.NamespaceOptions, &out.NamespaceOptions
|
||||
*out = new(NamespaceOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ServicesMetadata != nil {
|
||||
in, out := &in.ServicesMetadata, &out.ServicesMetadata
|
||||
*out = new(AdditionalMetadataSpec)
|
||||
if in.ServiceOptions != nil {
|
||||
in, out := &in.ServiceOptions, &out.ServiceOptions
|
||||
*out = new(ServiceOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.StorageClasses != nil {
|
||||
@@ -300,16 +442,7 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressClasses != nil {
|
||||
in, out := &in.IngressClasses, &out.IngressClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressHostnames != nil {
|
||||
in, out := &in.IngressHostnames, &out.IngressHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.IngressOptions.DeepCopyInto(&out.IngressOptions)
|
||||
if in.ContainerRegistries != nil {
|
||||
in, out := &in.ContainerRegistries, &out.ContainerRegistries
|
||||
*out = new(AllowedListSpec)
|
||||
@@ -344,11 +477,6 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPsSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ImagePullPolicies != nil {
|
||||
in, out := &in.ImagePullPolicies, &out.ImagePullPolicies
|
||||
*out = make([]ImagePullPolicySpec, len(*in))
|
||||
|
||||
@@ -24,19 +24,23 @@ The Capsule Operator Chart can be used to instantly deploy the Capsule Operator
|
||||
|
||||
$ helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
2. Install the Chart:
|
||||
2. Create the Namespace:
|
||||
|
||||
$ kubectl create namespace capsule-system
|
||||
|
||||
3. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system
|
||||
|
||||
3. Show the status:
|
||||
4. Show the status:
|
||||
|
||||
$ helm status capsule -n capsule-system
|
||||
|
||||
4. Upgrade the Chart
|
||||
5. Upgrade the Chart
|
||||
|
||||
$ helm upgrade capsule clastix/capsule -n capsule-system
|
||||
|
||||
5. Uninstall the Chart
|
||||
6. Uninstall the Chart
|
||||
|
||||
$ helm uninstall capsule -n capsule-system
|
||||
|
||||
@@ -65,8 +69,6 @@ Parameter | Description | Default
|
||||
`manager.options.forceTenantPrefix` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash | `false`
|
||||
`manager.options.capsuleUserGroup` | Override the Capsule user group | `capsule.clastix.io`
|
||||
`manager.options.protectedNamespaceRegex` | If specified, disallows creation of namespaces matching the passed regexp | `null`
|
||||
`manager.options.allowIngressHostnameCollision` | Allow the Ingress hostname collision at Ingress resource level across all the Tenants | `true`
|
||||
`manager.options.allowTenantIngressHostnamesCollision` | Skip the validation check at Tenant level for colliding Ingress hostnames | `false`
|
||||
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
|
||||
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
|
||||
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
|
||||
@@ -30,14 +30,8 @@ spec:
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
type: boolean
|
||||
protectedNamespaceRegex:
|
||||
|
||||
@@ -3,6 +3,7 @@ kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
creationTimestamp: null
|
||||
name: tenants.capsule.clastix.io
|
||||
spec:
|
||||
conversion:
|
||||
@@ -221,11 +222,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -407,11 +412,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -452,9 +461,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -564,14 +573,14 @@ spec:
|
||||
served: true
|
||||
storage: false
|
||||
subresources:
|
||||
status: { }
|
||||
status: {}
|
||||
- additionalPrinterColumns:
|
||||
- description: The actual state of the Tenant
|
||||
jsonPath: .status.state
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -603,6 +612,7 @@ spec:
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
description: Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
items:
|
||||
properties:
|
||||
clusterRoleName:
|
||||
@@ -635,6 +645,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
containerRegistries:
|
||||
description: Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -643,20 +654,8 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
enableNodePorts:
|
||||
default: true
|
||||
type: boolean
|
||||
externalServiceIPs:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
enum:
|
||||
- Always
|
||||
@@ -664,25 +663,41 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -750,22 +765,29 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -781,11 +803,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -967,11 +993,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1012,9 +1042,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1025,20 +1055,24 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
items:
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
description: Kind of tenant owner. Possible values are "User", "Group", and "ServiceAccount"
|
||||
enum:
|
||||
- User
|
||||
- Group
|
||||
- ServiceAccount
|
||||
type: string
|
||||
name:
|
||||
description: Name of tenant owner.
|
||||
type: string
|
||||
proxySettings:
|
||||
description: Proxy settings for tenant owner.
|
||||
items:
|
||||
properties:
|
||||
kind:
|
||||
@@ -1046,6 +1080,7 @@ spec:
|
||||
- Nodes
|
||||
- StorageClasses
|
||||
- IngressClasses
|
||||
- PriorityClasses
|
||||
type: string
|
||||
operations:
|
||||
items:
|
||||
@@ -1066,6 +1101,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1075,6 +1111,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
resourceQuotas:
|
||||
description: Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -1122,19 +1159,55 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
servicesMetadata:
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
allowedServices:
|
||||
description: Block or deny certain type of Services. Optional.
|
||||
properties:
|
||||
externalName:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1147,19 +1220,22 @@ spec:
|
||||
- owners
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
description: Returns the observed state of the Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
description: List of namespaces assigned to the Tenant.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
size:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
@@ -1169,7 +1245,7 @@ spec:
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: { }
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
|
||||
@@ -9,5 +9,3 @@ spec:
|
||||
- {{ . }}
|
||||
{{- end}}
|
||||
protectedNamespaceRegex: {{ .Values.manager.options.protectedNamespaceRegex | quote }}
|
||||
allowTenantIngressHostnamesCollision: {{ .Values.manager.options.allowTenantIngressHostnamesCollision }}
|
||||
allowIngressHostnameCollision: {{ .Values.manager.options.allowIngressHostnameCollision }}
|
||||
|
||||
@@ -21,8 +21,6 @@ manager:
|
||||
forceTenantPrefix: false
|
||||
capsuleUserGroups: ["capsule.clastix.io"]
|
||||
protectedNamespaceRegex: ""
|
||||
allowIngressHostnameCollision: true
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
|
||||
@@ -30,15 +30,8 @@ spec:
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration nolint:maligned
|
||||
description: CapsuleConfigurationSpec defines the Capsule configuration
|
||||
properties:
|
||||
allowIngressHostnameCollision:
|
||||
default: true
|
||||
description: Allow the collision of Ingress resource hostnames across all the Tenants.
|
||||
type: boolean
|
||||
allowTenantIngressHostnamesCollision:
|
||||
description: "When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). \n The JSON path of the resource is: /spec/ingressHostnames/allowed"
|
||||
type: boolean
|
||||
forceTenantPrefix:
|
||||
default: false
|
||||
description: Enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. This is useful to avoid Namespace name collision in a public CaaS environment.
|
||||
|
||||
@@ -222,11 +222,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -408,11 +412,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -453,9 +461,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -572,7 +580,7 @@ spec:
|
||||
name: State
|
||||
type: string
|
||||
- description: The max amount of Namespaces can be created
|
||||
jsonPath: .spec.namespaceQuota
|
||||
jsonPath: .spec.namespaceOptions.quota
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- description: The total amount of Namespaces in use
|
||||
@@ -604,6 +612,7 @@ spec:
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
description: Specifies additional RoleBindings assigned to the Tenant. Capsule will ensure that all namespaces in the Tenant always contain the RoleBinding for the given ClusterRole. Optional.
|
||||
items:
|
||||
properties:
|
||||
clusterRoleName:
|
||||
@@ -636,6 +645,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
containerRegistries:
|
||||
description: Specifies the trusted Image Registries assigned to the Tenant. Capsule assures that all Pods resources created in the Tenant can use only one of the allowed trusted registries. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -644,20 +654,8 @@ spec:
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
enableNodePorts:
|
||||
default: true
|
||||
type: boolean
|
||||
externalServiceIPs:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
imagePullPolicies:
|
||||
description: Specify the allowed values for the imagePullPolicies option in Pod resources. Capsule assures that all Pod resources created in the Tenant can use only one of the allowed policy. Optional.
|
||||
items:
|
||||
enum:
|
||||
- Always
|
||||
@@ -665,25 +663,41 @@ spec:
|
||||
- IfNotPresent
|
||||
type: string
|
||||
type: array
|
||||
ingressClasses:
|
||||
ingressOptions:
|
||||
description: Specifies options for the Ingress resources, such as allowed hostnames and IngressClass. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
allowedClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
allowedHostnames:
|
||||
description: Specifies the allowed hostnames in Ingresses for the given Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed hostnames. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
hostnameCollisionScope:
|
||||
default: Disabled
|
||||
description: "Defines the scope of hostname collision check performed when Tenant Owners create Ingress with allowed hostnames. \n - Cluster: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces managed by Capsule. \n - Tenant: disallow the creation of an Ingress if the pair hostname and path is already used across the Namespaces of the Tenant. \n - Namespace: disallow the creation of an Ingress if the pair hostname and path is already used in the Ingress Namespace. \n Optional."
|
||||
enum:
|
||||
- Cluster
|
||||
- Tenant
|
||||
- Namespace
|
||||
- Disabled
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -751,22 +765,29 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
namespaceQuota:
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
namespaceOptions:
|
||||
description: Specifies options for the Namespaces, such as additional metadata or maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Namespace resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
quota:
|
||||
description: Specifies the maximum number of namespaces allowed for that Tenant. Once the namespace quota assigned to the Tenant has been reached, the Tenant owner cannot create further namespaces. Optional.
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
type: object
|
||||
networkPolicies:
|
||||
description: Specifies the NetworkPolicies assigned to the Tenant. The assigned NetworkPolicies are inherited by any namespace created in the Tenant. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -782,11 +803,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -968,11 +993,15 @@ spec:
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow traffic on
|
||||
properties:
|
||||
endPort:
|
||||
description: If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate "NetworkPolicyEndPort".
|
||||
format: int32
|
||||
type: integer
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.
|
||||
description: The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
default: TCP
|
||||
@@ -1013,9 +1042,9 @@ spec:
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are "Ingress", "Egress", or "Ingress,Egress". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
description: List of rule types that the NetworkPolicy relates to. Valid options are ["Ingress"], ["Egress"], or ["Ingress", "Egress"]. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include "Egress" (since such a policy would not include an Egress section and would otherwise default to just [ "Ingress" ]). This field is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
description: PolicyType string describes the NetworkPolicy type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
@@ -1026,20 +1055,24 @@ spec:
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Specifies the label to control the placement of pods on a given pool of worker nodes. All namesapces created within the Tenant will have the node selector annotation. This annotation tells the Kubernetes scheduler to place pods on the nodes having the selector label. Optional.
|
||||
type: object
|
||||
owners:
|
||||
description: Specifies the owners of the Tenant. Mandatory.
|
||||
items:
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
description: Kind of tenant owner. Possible values are "User", "Group", and "ServiceAccount"
|
||||
enum:
|
||||
- User
|
||||
- Group
|
||||
- ServiceAccount
|
||||
type: string
|
||||
name:
|
||||
description: Name of tenant owner.
|
||||
type: string
|
||||
proxySettings:
|
||||
description: Proxy settings for tenant owner.
|
||||
items:
|
||||
properties:
|
||||
kind:
|
||||
@@ -1047,6 +1080,7 @@ spec:
|
||||
- Nodes
|
||||
- StorageClasses
|
||||
- IngressClasses
|
||||
- PriorityClasses
|
||||
type: string
|
||||
operations:
|
||||
items:
|
||||
@@ -1067,6 +1101,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
priorityClasses:
|
||||
description: Specifies the allowed IngressClasses assigned to the Tenant. Capsule assures that all Ingress resources created in the Tenant can use only one of the allowed IngressClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1076,6 +1111,7 @@ spec:
|
||||
type: string
|
||||
type: object
|
||||
resourceQuotas:
|
||||
description: Specifies a list of ResourceQuota resources assigned to the Tenant. The assigned values are inherited by any namespace created in the Tenant. The Capsule operator aggregates ResourceQuota at Tenant level, so that the hard quota is never crossed for the given Tenant. This permits the Tenant owner to consume resources in the Tenant regardless of the namespace. Optional.
|
||||
properties:
|
||||
items:
|
||||
items:
|
||||
@@ -1123,19 +1159,55 @@ spec:
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
scope:
|
||||
default: Tenant
|
||||
description: Define if the Resource Budget should compute resource across all Namespaces in the Tenant or individually per cluster. Default is Tenant
|
||||
enum:
|
||||
- Tenant
|
||||
- Namespace
|
||||
type: string
|
||||
type: object
|
||||
servicesMetadata:
|
||||
serviceOptions:
|
||||
description: Specifies options for the Service, such as additional metadata or block of certain type of Services. Optional.
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
additionalMetadata:
|
||||
description: Specifies additional labels and annotations the Capsule operator places on any Service resource in the Tenant. Optional.
|
||||
properties:
|
||||
annotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
labels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
allowedServices:
|
||||
description: Block or deny certain type of Services. Optional.
|
||||
properties:
|
||||
externalName:
|
||||
default: true
|
||||
description: Specifies if ExternalName service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
nodePort:
|
||||
default: true
|
||||
description: Specifies if NodePort service type resources are allowed for the Tenant. Default is true. Optional.
|
||||
type: boolean
|
||||
type: object
|
||||
externalIPs:
|
||||
description: Specifies the external IPs that can be used in Services with type ClusterIP. An empty list means no IPs are allowed. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
description: Specifies the allowed StorageClasses assigned to the Tenant. Capsule assures that all PersistentVolumeClaim resources created in the Tenant can use only one of the allowed StorageClasses. Optional.
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
@@ -1148,19 +1220,22 @@ spec:
|
||||
- owners
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
description: Returns the observed state of the Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
description: List of namespaces assigned to the Tenant.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
size:
|
||||
description: How many namespaces are assigned to the Tenant.
|
||||
type: integer
|
||||
state:
|
||||
default: active
|
||||
default: Active
|
||||
description: The operational state of the Tenant. Possible values are "Active", "Cordoned".
|
||||
enum:
|
||||
- cordoned
|
||||
- active
|
||||
- Cordoned
|
||||
- Active
|
||||
type: string
|
||||
required:
|
||||
- size
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1673
config/install.yaml
Normal file
1673
config/install.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -6,5 +6,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -7,4 +7,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: quay.io/clastix/capsule
|
||||
newTag: v0.1.0-rc2
|
||||
newTag: v0.1.0-rc5
|
||||
|
||||
@@ -7,5 +7,3 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
|
||||
@@ -1,7 +1,139 @@
|
||||
---
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant-sample
|
||||
name: gas
|
||||
spec:
|
||||
# Add fields here
|
||||
foo: bar
|
||||
additionalRoleBindings:
|
||||
-
|
||||
clusterRoleName: tenant-sample-viewer
|
||||
subjects:
|
||||
-
|
||||
kind: User
|
||||
name: bob
|
||||
containerRegistries:
|
||||
allowed:
|
||||
- docker.io
|
||||
- quay.io
|
||||
allowedRegex: ^\w+.gcr.io$
|
||||
serviceOptions:
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
capsule.clastix.io/bgp: "true"
|
||||
labels:
|
||||
capsule.clastix.io/pool: gas
|
||||
allowedServices:
|
||||
nodePort: false
|
||||
externalName: false
|
||||
externalIPs:
|
||||
allowed:
|
||||
- 10.20.0.0/16
|
||||
- "10.96.42.42"
|
||||
imagePullPolicies:
|
||||
- Always
|
||||
ingressOptions:
|
||||
hostnameCollisionScope: Cluster
|
||||
allowedClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+-lb$
|
||||
allowedHostnames:
|
||||
allowed:
|
||||
- gas.acmecorp.com
|
||||
allowedRegex: ^.*acmecorp.com$
|
||||
limitRanges:
|
||||
items:
|
||||
-
|
||||
limits:
|
||||
-
|
||||
max:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
min:
|
||||
cpu: 50m
|
||||
memory: 5Mi
|
||||
type: Pod
|
||||
-
|
||||
default:
|
||||
cpu: 200m
|
||||
memory: 100Mi
|
||||
defaultRequest:
|
||||
cpu: 100m
|
||||
memory: 10Mi
|
||||
max:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
min:
|
||||
cpu: 50m
|
||||
memory: 5Mi
|
||||
type: Container
|
||||
-
|
||||
max:
|
||||
storage: 10Gi
|
||||
min:
|
||||
storage: 1Gi
|
||||
type: PersistentVolumeClaim
|
||||
namespaceOptions:
|
||||
quota: 3
|
||||
additionalMetadata:
|
||||
annotations:
|
||||
capsule.clastix.io/backup: "false"
|
||||
labels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
networkPolicies:
|
||||
items:
|
||||
-
|
||||
egress:
|
||||
-
|
||||
to:
|
||||
-
|
||||
ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 192.168.0.0/12
|
||||
ingress:
|
||||
-
|
||||
from:
|
||||
-
|
||||
namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
-
|
||||
podSelector: {}
|
||||
-
|
||||
ipBlock:
|
||||
cidr: 192.168.0.0/12
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
owners:
|
||||
-
|
||||
kind: User
|
||||
name: bob
|
||||
priorityClasses:
|
||||
allowed:
|
||||
- shared-nodes
|
||||
allowedRegex: ^\w-gas$
|
||||
resourceQuotas:
|
||||
items:
|
||||
-
|
||||
hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
scopes:
|
||||
- NotTerminating
|
||||
-
|
||||
hard:
|
||||
pods: "10"
|
||||
-
|
||||
hard:
|
||||
requests.storage: 100Gi
|
||||
storageClasses:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ^\w+fs$
|
||||
|
||||
@@ -2,3 +2,4 @@
|
||||
resources:
|
||||
- capsule_v1alpha1_capsuleconfiguration.yaml
|
||||
- capsule_v1alpha1_tenant.yaml
|
||||
- capsule_v1beta1_tenant.yaml
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package rbac
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -25,8 +25,8 @@ type Manager struct {
|
||||
}
|
||||
|
||||
// InjectClient injects the Client interface, required by the Runnable interface
|
||||
func (r *Manager) InjectClient(c client.Client) error {
|
||||
r.Client = c
|
||||
func (c *Manager) InjectClient(client client.Client) error {
|
||||
c.Client = client
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -52,22 +52,22 @@ func forOptionPerInstanceName(instanceName string) builder.ForOption {
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
func (c *Manager) SetupWithManager(mgr ctrl.Manager, configurationName string) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1alpha1.CapsuleConfiguration{}, forOptionPerInstanceName(configurationName)).
|
||||
Complete(r)
|
||||
Complete(c)
|
||||
}
|
||||
|
||||
func (r *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
r.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
func (c *Manager) Reconcile(ctx context.Context, request reconcile.Request) (res reconcile.Result, err error) {
|
||||
c.Log.Info("CapsuleConfiguration reconciliation started", "request.name", request.Name)
|
||||
|
||||
cfg := configuration.NewCapsuleConfiguration(r.Client, request.Name)
|
||||
cfg := configuration.NewCapsuleConfiguration(c.Client, request.Name)
|
||||
// Validating the Capsule Configuration options
|
||||
if _, err = cfg.ProtectedNamespaceRegexp(); err != nil {
|
||||
panic(errors.Wrap(err, "Invalid configuration for protected Namespace regex"))
|
||||
}
|
||||
|
||||
r.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
c.Log.Info("CapsuleConfiguration reconciliation finished", "request.name", request.Name)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -157,7 +157,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
|
||||
group := errgroup.Group{}
|
||||
group := new(errgroup.Group)
|
||||
group.Go(func() error {
|
||||
return r.UpdateMutatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
|
||||
@@ -53,8 +53,8 @@ func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, r.obj, func() (err error) {
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServicesMetadata.AdditionalLabels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServicesMetadata.AdditionalAnnotations))
|
||||
r.obj.SetLabels(r.sync(r.obj.GetLabels(), tenant.Spec.ServiceOptions.AdditionalMetadata.Labels))
|
||||
r.obj.SetAnnotations(r.sync(r.obj.GetAnnotations(), tenant.Spec.ServiceOptions.AdditionalMetadata.Annotations))
|
||||
return nil
|
||||
})
|
||||
|
||||
@@ -78,7 +78,7 @@ func (r *abstractServiceLabelsReconciler) getTenant(ctx context.Context, namespa
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tenant.Spec.ServicesMetadata == nil {
|
||||
if tenant.Spec.ServiceOptions == nil || tenant.Spec.ServiceOptions.AdditionalMetadata == nil {
|
||||
return nil, NewNoServicesMetadata(namespacedName.Name)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
)
|
||||
@@ -24,12 +25,16 @@ func (r *EndpointSlicesLabelsReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
log: r.Log,
|
||||
}
|
||||
|
||||
if r.VersionMajor == 1 && r.VersionMinor <= 16 {
|
||||
switch {
|
||||
case r.VersionMajor == 1 && r.VersionMinor <= 16:
|
||||
r.Log.Info("Skipping controller setup, as EndpointSlices are not supported on current kubernetes version", "VersionMajor", r.VersionMajor, "VersionMinor", r.VersionMinor)
|
||||
return nil
|
||||
case r.VersionMajor == 1 && r.VersionMinor >= 21:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1.EndpointSlice{}
|
||||
default:
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
}
|
||||
|
||||
r.abstractServiceLabelsReconciler.obj = &discoveryv1beta1.EndpointSlice{}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(r.obj, r.abstractServiceLabelsReconciler.forOptionPerInstanceName()).
|
||||
Complete(r)
|
||||
|
||||
80
controllers/tenant/limitranges.go
Normal file
80
controllers/tenant/limitranges.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncLimitRange(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncLimitRange(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting LimitRange labels for the mutateFn
|
||||
var tenantLabel, limitRangeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
if limitRangeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(namespace, keys, &corev1.LimitRange{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
target := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
limitRangeLabel: strconv.Itoa(i),
|
||||
}
|
||||
target.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
127
controllers/tenant/manager.go
Normal file
127
controllers/tenant/manager.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
if err = r.Get(ctx, request.NamespacedName, instance); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
if instance.Spec.NetworkPolicies != nil {
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies.Items))
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.LimitRanges != nil {
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.ResourceQuota != nil {
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
func (r *Manager) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
153
controllers/tenant/namespaces.go
Normal file
153
controllers/tenant/namespaces.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
annotations := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
annotations["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressOptions.AllowedClasses != nil {
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressOptions.AllowedClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressOptions.AllowedClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressOptions.AllowedClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
annotations[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
ns.SetAnnotations(annotations)
|
||||
|
||||
newLabels := map[string]string{
|
||||
"name": namespace,
|
||||
capsuleLabel: tnt.GetName(),
|
||||
}
|
||||
|
||||
if tnt.Spec.NamespaceOptions != nil && tnt.Spec.NamespaceOptions.AdditionalMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespaceOptions.AdditionalMetadata.Labels {
|
||||
newLabels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
ns.SetLabels(newLabels)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Manager) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
found.Status.Size = tenant.Status.Size
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
list := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), list, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(list.Items)
|
||||
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
82
controllers/tenant/networkpolicies.go
Normal file
82
controllers/tenant/networkpolicies.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncNetworkPolicy(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncNetworkPolicy(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
if err = r.pruningResources(namespace, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
var tenantLabel, networkPolicyLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if networkPolicyLabel, err = capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
target := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
networkPolicyLabel: strconv.Itoa(i),
|
||||
})
|
||||
target.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
251
controllers/tenant/resourcequotas.go
Normal file
251
controllers/tenant/resourcequotas.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// When the Resource Budget assigned to a Tenant is Tenant-scoped we have to rely on the ResourceQuota resources to
|
||||
// represent the resource quota for the single Tenant rather than the single Namespace,
|
||||
// so abusing of this API although its Namespaced scope.
|
||||
//
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in first time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger following reconciliations but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
//
|
||||
// In case of Namespace-scoped Resource Budget, we're just replicating the resources across all registered Namespaces.
|
||||
func (r *Manager) syncResourceQuotas(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeTenant {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
index := i
|
||||
|
||||
resourceQuota := q
|
||||
|
||||
group.Go(func() (scopeErr error) {
|
||||
// Calculating the Resource Budget at Tenant scope just if this is put in place.
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
var tntRequirement *labels.Requirement
|
||||
if tntRequirement, scopeErr = labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
var indexRequirement *labels.Requirement
|
||||
if indexRequirement, scopeErr = labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(index)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
list := &corev1.ResourceQuotaList{}
|
||||
if scopeErr = r.List(context.TODO(), list, &client.ListOptions{LabelSelector: labels.NewSelector().Add(*tntRequirement).Add(*indexRequirement)}); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "Cannot list ResourceQuota", "tenantFilter", tntRequirement.String(), "indexFilter", indexRequirement.String())
|
||||
return
|
||||
}
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for name, hardQuota := range resourceQuota.Hard {
|
||||
r.Log.Info("Desired hard " + name.String() + " quota is " + hardQuota.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var quantity resource.Quantity
|
||||
for _, item := range list.Items {
|
||||
quantity.Add(item.Status.Used[name])
|
||||
}
|
||||
r.Log.Info("Computed " + name.String() + " quota for the whole Tenant is " + quantity.String())
|
||||
|
||||
switch quantity.Cmp(resourceQuota.Hard[name]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for item := range list.Items {
|
||||
if _, ok := list.Items[item].Status.Used[name]; ok {
|
||||
list.Items[item].Spec.Hard[name] = list.Items[item].Status.Used[name]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[name] = resource.Quantity{}
|
||||
list.Items[item].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for item := range list.Items {
|
||||
if list.Items[item].Spec.Hard == nil {
|
||||
list.Items[item].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
list.Items[item].Spec.Hard[name] = resourceQuota.Hard[name]
|
||||
}
|
||||
}
|
||||
if scopeErr = r.resourceQuotasUpdate(name, quantity, resourceQuota.Hard[name], list.Items...); scopeErr != nil {
|
||||
r.Log.Error(scopeErr, "cannot proceed with outer ResourceQuota")
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
// Waiting the update of all ResourceQuotas
|
||||
if err = group.Wait(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncResourceQuota(tenant, namespace, keys)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncResourceQuota(tenant *capsulev1beta1.Tenant, namespace string, keys []string) (err error) {
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if typeLabel, err = capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// Pruning resource of non-requested resources
|
||||
if err = r.pruningResources(namespace, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for index, resQuota := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, index),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
res, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(index),
|
||||
})
|
||||
target.Spec.Scopes = resQuota.Scopes
|
||||
target.Spec.ScopeSelector = resQuota.ScopeSelector
|
||||
// In case of Namespace scope for the ResourceQuota we can easily apply the bare specification
|
||||
if tenant.Spec.ResourceQuota.Scope == capsulev1beta1.ResourceQuotaScopeNamespace {
|
||||
target.Spec.Hard = resQuota.Hard
|
||||
}
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *Manager) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
|
||||
group.Go(func() (err error) {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err = r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
|
||||
_, retryErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
|
||||
return retryErr
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
179
controllers/tenant/rolebindings.go
Normal file
179
controllers/tenant/rolebindings.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *Manager) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hashFn := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hashFn(i))
|
||||
}
|
||||
|
||||
group := new(errgroup.Group)
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
namespace := ns
|
||||
|
||||
group.Go(func() error {
|
||||
return r.syncAdditionalRoleBinding(tenant, namespace, keys, hashFn)
|
||||
})
|
||||
}
|
||||
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
func (r *Manager) syncAdditionalRoleBinding(tenant *capsulev1beta1.Tenant, ns string, keys []string, hashFn func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string) (err error) {
|
||||
var tenantLabel, roleBindingLabel string
|
||||
|
||||
if tenantLabel, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if roleBindingLabel, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
roleBindingHashLabel := hashFn(roleBinding)
|
||||
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() error {
|
||||
target.ObjectMeta.Labels = map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
roleBindingLabel: roleBindingHashLabel,
|
||||
}
|
||||
target.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
target.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", target.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *Manager) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newLabels := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
list := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
list[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for namespacedName, roleRef := range list {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespacedName.Name,
|
||||
Namespace: namespacedName.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = newLabels
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = roleRef
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
63
controllers/tenant/utils.go
Normal file
63
controllers/tenant/utils.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package tenant
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *Manager) pruningResources(ns string, keys []string, obj client.Object) (err error) {
|
||||
var capsuleLabel string
|
||||
if capsuleLabel, err = capsulev1beta1.GetTypeLabel(obj); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
|
||||
var exists *labels.Requirement
|
||||
if exists, err = labels.NewRequirement(capsuleLabel, selection.Exists, []string{}); err != nil {
|
||||
return
|
||||
}
|
||||
selector = selector.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
if notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
selector = selector.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + selector.String())
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: selector,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Manager) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
@@ -1,739 +0,0 @@
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/retry"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
"github.com/clastix/capsule/controllers/rbac"
|
||||
)
|
||||
|
||||
// TenantReconciler reconciles a Tenant object
|
||||
type TenantReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&capsulev1beta1.Tenant{}).
|
||||
Owns(&corev1.Namespace{}).
|
||||
Owns(&networkingv1.NetworkPolicy{}).
|
||||
Owns(&corev1.LimitRange{}).
|
||||
Owns(&corev1.ResourceQuota{}).
|
||||
Owns(&rbacv1.RoleBinding{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TenantReconciler) Reconcile(ctx context.Context, request ctrl.Request) (result ctrl.Result, err error) {
|
||||
r.Log = r.Log.WithValues("Request.Name", request.Name)
|
||||
|
||||
// Fetch the Tenant instance
|
||||
instance := &capsulev1beta1.Tenant{}
|
||||
err = r.Get(ctx, request.NamespacedName, instance)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
r.Log.Info("Request object not found, could have been deleted after reconcile request")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return
|
||||
}
|
||||
// Ensuring the Tenant Status
|
||||
if err = r.updateTenantStatus(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot update Tenant status")
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return
|
||||
}
|
||||
|
||||
if instance.Spec.NetworkPolicies != nil {
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies.Items))
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.LimitRanges != nil {
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges.Items))
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if instance.Spec.ResourceQuota != nil {
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota.Items))
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring additional RoleBindings for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional RoleBindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// pruningResources is taking care of removing the no more requested sub-resources as LimitRange, ResourceQuota or
|
||||
// NetworkPolicy using the "exists" and "notin" LabelSelector to perform an outer-join removal.
|
||||
func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client.Object) error {
|
||||
capsuleLabel, err := capsulev1beta1.GetTypeLabel(obj)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := labels.NewSelector()
|
||||
|
||||
exists, err := labels.NewRequirement(capsuleLabel, selection.Exists, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
var notIn *labels.Requirement
|
||||
notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = s.Add(*notIn)
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + s.String())
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: s,
|
||||
Namespace: ns,
|
||||
},
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *TenantReconciler) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) error {
|
||||
g := errgroup.Group{}
|
||||
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
g.Go(func() error {
|
||||
found := &corev1.ResourceQuota{}
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1beta1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1beta1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = g.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *TenantReconciler) syncAdditionalRoleBindings(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hash := func(binding capsulev1beta1.AdditionalRoleBindingsSpec) string {
|
||||
h := fnv.New64a()
|
||||
|
||||
_, _ = h.Write([]byte(binding.ClusterRoleName))
|
||||
|
||||
for _, sub := range binding.Subjects {
|
||||
_, _ = h.Write([]byte(sub.Kind + sub.Name))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hash(i))
|
||||
}
|
||||
|
||||
var tl, ll string
|
||||
tl, err = capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ll, err = capsulev1beta1.GetTypeLabel(&rbacv1.RoleBinding{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, roleBinding := range tenant.Spec.AdditionalRoleBindings {
|
||||
lv := hash(roleBinding)
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d-%s", tenant.Name, i, roleBinding.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, rb, func() error {
|
||||
rb.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: lv,
|
||||
}
|
||||
rb.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: roleBinding.ClusterRoleName,
|
||||
}
|
||||
rb.Subjects = roleBinding.Subjects
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, rb, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, rb.GetNamespace(), res, fmt.Sprintf("Ensuring additional RoleBinding %s", rb.GetName()), err)
|
||||
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", rb.Name, "namespace", rb.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// We're relying on the ResourceQuota resource to represent the resource quota for the single Tenant rather than the
|
||||
// single Namespace, so abusing of this API although its Namespaced scope.
|
||||
// Since a Namespace could take-up all the available resource quota, the Namespace ResourceQuota will be a 1:1 mapping
|
||||
// to the Tenant one: in a second time Capsule is going to sum all the analogous ResourceQuota resources on other Tenant
|
||||
// namespaces to check if the Tenant quota has been exceeded or not, reusing the native Kubernetes policy putting the
|
||||
// .Status.Used value as the .Hard value.
|
||||
// This will trigger a following reconciliation but that's ok: the mutateFn will re-use the same business logic, letting
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
func (r *TenantReconciler) syncResourceQuotas(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested ResourceQuota keys
|
||||
keys := make([]string, 0, len(tenant.Spec.ResourceQuota.Items))
|
||||
for i := range tenant.Spec.ResourceQuota.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
tenantLabel, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
typeLabel, err := capsulev1beta1.GetTypeLabel(&corev1.ResourceQuota{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, q := range tenant.Spec.ResourceQuota.Items {
|
||||
target := &corev1.ResourceQuota{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.SetLabels(map[string]string{
|
||||
tenantLabel: tenant.Name,
|
||||
typeLabel: strconv.Itoa(i),
|
||||
})
|
||||
// Requirement to list ResourceQuota of the current Tenant
|
||||
tr, err := labels.NewRequirement(tenantLabel, selection.Equals, []string{tenant.Name})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota Tenant requirement")
|
||||
}
|
||||
// Requirement to list ResourceQuota for the current index
|
||||
ir, err := labels.NewRequirement(typeLabel, selection.Equals, []string{strconv.Itoa(i)})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot build ResourceQuota index requirement")
|
||||
}
|
||||
|
||||
// Listing all the ResourceQuota according to the said requirements.
|
||||
// These are required since Capsule is going to sum all the used quota to
|
||||
// sum them and get the Tenant one.
|
||||
rql := &corev1.ResourceQuotaList{}
|
||||
err = r.List(context.TODO(), rql, &client.ListOptions{
|
||||
LabelSelector: labels.NewSelector().Add(*tr).Add(*ir),
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot list ResourceQuota", "tenantFilter", tr.String(), "indexFilter", ir.String())
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterating over all the options declared for the ResourceQuota,
|
||||
// summing all the used quota across different Namespaces to determinate
|
||||
// if we're hitting a Hard quota at Tenant level.
|
||||
// For this case, we're going to block the Quota setting the Hard as the
|
||||
// used one.
|
||||
for rn, rq := range q.Hard {
|
||||
r.Log.Info("Desired hard " + rn.String() + " quota is " + rq.String())
|
||||
|
||||
// Getting the whole usage across all the Tenant Namespaces
|
||||
var qt resource.Quantity
|
||||
for _, rq := range rql.Items {
|
||||
qt.Add(rq.Status.Used[rn])
|
||||
}
|
||||
r.Log.Info("Computed " + rn.String() + " quota for the whole Tenant is " + qt.String())
|
||||
|
||||
switch qt.Cmp(q.Hard[rn]) {
|
||||
case 0:
|
||||
// The Tenant is matching exactly the Quota:
|
||||
// falling through next case since we have to block further
|
||||
// resource allocations.
|
||||
fallthrough
|
||||
case 1:
|
||||
// The Tenant is OverQuota:
|
||||
// updating all the related ResourceQuota with the current
|
||||
// used Quota to block further creations.
|
||||
for i := range rql.Items {
|
||||
if _, ok := rql.Items[i].Status.Used[rn]; ok {
|
||||
rql.Items[i].Spec.Hard[rn] = rql.Items[i].Status.Used[rn]
|
||||
} else {
|
||||
um := make(map[corev1.ResourceName]resource.Quantity)
|
||||
um[rn] = resource.Quantity{}
|
||||
rql.Items[i].Spec.Hard = um
|
||||
}
|
||||
}
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciled one.
|
||||
for i := range rql.Items {
|
||||
if rql.Items[i].Spec.Hard == nil {
|
||||
rql.Items[i].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
}
|
||||
rql.Items[i].Spec.Hard[rn] = q.Hard[rn]
|
||||
}
|
||||
target.Spec = q
|
||||
}
|
||||
if err := r.resourceQuotasUpdate(rn, qt, q.Hard[rn], rql.Items...); err != nil {
|
||||
r.Log.Error(err, "cannot proceed with outer ResourceQuota")
|
||||
return err
|
||||
}
|
||||
}
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring ResourceQuota %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Resource Quota sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncLimitRanges(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
for i := range tenant.Spec.LimitRanges.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting LimitRange labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ll, err := capsulev1beta1.GetTypeLabel(&corev1.LimitRange{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &corev1.LimitRange{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.LimitRanges.Items {
|
||||
t := &corev1.LimitRange{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: strconv.Itoa(i),
|
||||
}
|
||||
t.Spec = spec
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring LimitRange %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("LimitRange sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) syncNamespaceMetadata(namespace string, tnt *capsulev1beta1.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (conflictErr error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if conflictErr = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
res, conflictErr = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
|
||||
a := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalAnnotations {
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.NodeSelector != nil {
|
||||
var selector []string
|
||||
for k, v := range tnt.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
a["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
}
|
||||
|
||||
if tnt.Spec.IngressClasses != nil {
|
||||
if len(tnt.Spec.IngressClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
a[capsulev1beta1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
a[capsulev1beta1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
ns.SetAnnotations(a)
|
||||
|
||||
l := make(map[string]string)
|
||||
|
||||
if tnt.Spec.NamespacesMetadata != nil {
|
||||
for k, v := range tnt.Spec.NamespacesMetadata.AdditionalLabels {
|
||||
l[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
l["name"] = namespace
|
||||
capsuleLabel, _ := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
l[capsuleLabel] = tnt.GetName()
|
||||
ns.SetLabels(l)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
|
||||
r.emitEvent(tnt, namespace, res, "Ensuring Namespace metadata", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNamespaces(tenant *capsulev1beta1.Tenant) (err error) {
|
||||
group := errgroup.Group{}
|
||||
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
group.Go(func() error {
|
||||
return r.syncNamespaceMetadata(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNetworkPolicies(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
for i := range tenant.Spec.NetworkPolicies.Items {
|
||||
keys = append(keys, strconv.Itoa(i))
|
||||
}
|
||||
|
||||
// getting NetworkPolicy labels for the mutateFn
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nl, err := capsulev1beta1.GetTypeLabel(&networkingv1.NetworkPolicy{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err := r.pruningResources(ns, keys, &networkingv1.NetworkPolicy{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for i, spec := range tenant.Spec.NetworkPolicies.Items {
|
||||
t := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%d", tenant.Name, i),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
res, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, t, func() (err error) {
|
||||
t.SetLabels(map[string]string{
|
||||
tl: tenant.Name,
|
||||
nl: strconv.Itoa(i),
|
||||
})
|
||||
t.Spec = spec
|
||||
|
||||
return controllerutil.SetControllerReference(tenant, t, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, t.GetNamespace(), res, fmt.Sprintf("Ensuring NetworkPolicy %s", t.GetName()), err)
|
||||
|
||||
r.Log.Info("Network Policy sync result: "+string(res), "name", t.Name, "namespace", t.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Each Tenant owner needs the admin Role attached to each Namespace, otherwise no actions on it can be performed.
|
||||
// Since RBAC is based on deny all first, some specific actions like editing Capsule resources are going to be blocked
|
||||
// via Dynamic Admission Webhooks.
|
||||
// TODO(prometherion): we could create a capsule:admin role rather than hitting webhooks for each action
|
||||
func (r *TenantReconciler) ownerRoleBinding(tenant *capsulev1beta1.Tenant) error {
|
||||
// getting RoleBinding label for the mutateFn
|
||||
var subjects []rbacv1.Subject
|
||||
|
||||
tl, err := capsulev1beta1.GetTypeLabel(&capsulev1beta1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l := map[string]string{tl: tenant.Name}
|
||||
|
||||
for _, owner := range tenant.Spec.Owners {
|
||||
if owner.Kind == "ServiceAccount" {
|
||||
splitName := strings.Split(owner.Name, ":")
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
Kind: owner.Kind.String(),
|
||||
Name: splitName[len(splitName)-1],
|
||||
Namespace: splitName[len(splitName)-2],
|
||||
})
|
||||
} else {
|
||||
subjects = append(subjects, rbacv1.Subject{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: owner.Kind.String(),
|
||||
Name: owner.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
rbl := make(map[types.NamespacedName]rbacv1.RoleRef)
|
||||
for _, i := range tenant.Status.Namespaces {
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace:admin"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "admin",
|
||||
}
|
||||
rbl[types.NamespacedName{Namespace: i, Name: "namespace-deleter"}] = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: rbac.DeleterRoleName,
|
||||
}
|
||||
}
|
||||
|
||||
for nn, rr := range rbl {
|
||||
target := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nn.Name,
|
||||
Namespace: nn.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, target, func() (err error) {
|
||||
target.ObjectMeta.Labels = l
|
||||
target.Subjects = subjects
|
||||
target.RoleRef = rr
|
||||
return controllerutil.SetControllerReference(tenant, target, r.Scheme)
|
||||
})
|
||||
|
||||
r.emitEvent(tenant, target.GetNamespace(), res, fmt.Sprintf("Ensuring Capsule RoleBinding %s", target.GetName()), err)
|
||||
|
||||
r.Log.Info("Role Binding sync result: "+string(res), "name", target.Name, "namespace", target.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
found := &capsulev1beta1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
found.Status.Size = tenant.Status.Size
|
||||
return r.Client.Status().Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) emitEvent(object runtime.Object, namespace string, res controllerutil.OperationResult, msg string, err error) {
|
||||
var eventType = corev1.EventTypeNormal
|
||||
if err != nil {
|
||||
eventType = corev1.EventTypeWarning
|
||||
res = "Error"
|
||||
}
|
||||
|
||||
r.Recorder.AnnotatedEventf(object, map[string]string{"OperationResult": string(res)}, eventType, namespace, msg)
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) collectNamespaces(tenant *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
nl := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), nl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(nl.Items)
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) updateTenantStatus(tnt *capsulev1beta1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
if tnt.IsCordoned() {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateCordoned
|
||||
} else {
|
||||
tnt.Status.State = capsulev1beta1.TenantStateActive
|
||||
}
|
||||
|
||||
return r.Client.Status().Update(context.Background(), tnt)
|
||||
})
|
||||
}
|
||||
@@ -167,9 +167,10 @@ _ValidatingWebhookConfiguration_ too, adding the said `ngrok` URL as base for
|
||||
each defined webhook, as following:
|
||||
|
||||
```diff
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
|
||||
name: capsule-mutating-webhook-configuration
|
||||
webhooks:
|
||||
- name: owner.namespace.capsule.clastix.io
|
||||
|
||||
@@ -0,0 +1,77 @@
|
||||
# Allow self-service management of Network Policies
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating own network policies in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of network-policies.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
```
|
||||
|
||||
As tenant owner check for permissions to manage networkpolicy for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i create networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i update networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i patch networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i delete networkpolicies
|
||||
kubectl --kubeconfig alice auth can-i deletecollection networkpolicies
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
@@ -0,0 +1,58 @@
|
||||
# Allow self-service management of Role Bindings
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating own rolebindings in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of roles.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner check for permissions to manage rolebindings for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get rolebindings
|
||||
kubectl --kubeconfig alice auth can-i create rolebindings
|
||||
kubectl --kubeconfig alice auth can-i update rolebindings
|
||||
kubectl --kubeconfig alice auth can-i patch rolebindings
|
||||
kubectl --kubeconfig alice auth can-i delete rolebindings
|
||||
kubectl --kubeconfig alice auth can-i deletecollection rolebindings
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
58
docs/operator/mtb/allow-self-service-management-of-roles.md
Normal file
58
docs/operator/mtb/allow-self-service-management-of-roles.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Allow self-service management of Roles
|
||||
|
||||
**Profile Applicability:** L2
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Self-Service Operations
|
||||
|
||||
**Description:** Tenants should be able to perform self-service operations by creating own roles in their namespaces.
|
||||
|
||||
**Rationale:** Enables self-service management of roles.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner check for permissions to manage roles for each verb
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get roles
|
||||
kubectl --kubeconfig alice auth can-i create roles
|
||||
kubectl --kubeconfig alice auth can-i update roles
|
||||
kubectl --kubeconfig alice auth can-i patch roles
|
||||
kubectl --kubeconfig alice auth can-i delete roles
|
||||
kubectl --kubeconfig alice auth can-i deletecollection roles
|
||||
```
|
||||
|
||||
Each command must return 'yes'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
113
docs/operator/mtb/block-access-to-cluster-resources.md
Normal file
113
docs/operator/mtb/block-access-to-cluster-resources.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# Block access to cluster resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Tenants should not be able to view, edit, create, or delete cluster (non-namespaced) resources such Node, ClusterRole, ClusterRoleBinding, etc.
|
||||
|
||||
**Rationale:** Access controls should be configured for tenants so that a tenant cannot list, create, modify or delete cluster resources
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As cluster admin, run the following command to retrieve the list of non-namespaced resources
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin api-resources --namespaced=false
|
||||
```
|
||||
For all non-namespaced resources, and each verb (get, list, create, update, patch, watch, delete, and deletecollection) issue the following command:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i <verb> <resource>
|
||||
```
|
||||
Each command must return `no`
|
||||
|
||||
**Exception:**
|
||||
|
||||
It should, but it does not:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectaccessreviews
|
||||
yes
|
||||
kubectl --kubeconfig alice auth can-i create selfsubjectrulesreviews
|
||||
yes
|
||||
kubectl --kubeconfig alice auth can-i create namespaces
|
||||
yes
|
||||
```
|
||||
|
||||
Any kubernetes user can create `SelfSubjectAccessReview` and `SelfSubjectRulesReviews` to checks whether he/she can perform an action. First two exceptions are not an issue.
|
||||
|
||||
```bash
|
||||
kubectl --anyuser auth can-i --list
|
||||
Resources Non-Resource URLs Resource Names Verbs
|
||||
selfsubjectaccessreviews.authorization.k8s.io [] [] [create]
|
||||
selfsubjectrulesreviews.authorization.k8s.io [] [] [create]
|
||||
[/api/*] [] [get]
|
||||
[/api] [] [get]
|
||||
[/apis/*] [] [get]
|
||||
[/apis] [] [get]
|
||||
[/healthz] [] [get]
|
||||
[/healthz] [] [get]
|
||||
[/livez] [] [get]
|
||||
[/livez] [] [get]
|
||||
[/openapi/*] [] [get]
|
||||
[/openapi] [] [get]
|
||||
[/readyz] [] [get]
|
||||
[/readyz] [] [get]
|
||||
[/version/] [] [get]
|
||||
[/version/] [] [get]
|
||||
[/version] [] [get]
|
||||
[/version] [] [get]
|
||||
```
|
||||
|
||||
In order to enable namespace self-service provisioning, Capsule intentionally gives permissions to create namespaces to all users belonging to the Capsule group:
|
||||
|
||||
```bash
|
||||
kubectl describe clusterrolebindings capsule-namespace-provisioner
|
||||
Name: capsule-namespace-provisioner
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
Role:
|
||||
Kind: ClusterRole
|
||||
Name: capsule-namespace-provisioner
|
||||
Subjects:
|
||||
Kind Name Namespace
|
||||
---- ---- ---------
|
||||
Group capsule.clastix.io
|
||||
|
||||
kubectl describe clusterrole capsule-namespace-provisioner
|
||||
Name: capsule-namespace-provisioner
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
PolicyRule:
|
||||
Resources Non-Resource URLs Resource Names Verbs
|
||||
--------- ----------------- -------------- -----
|
||||
namespaces [] [] [create]
|
||||
```
|
||||
|
||||
Capsule controls self-service namespace creation by limiting the number of namespaces the user can create by the `tenant.spec.namespaceQuota option`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
155
docs/operator/mtb/block-access-to-multitenant-resources.md
Normal file
155
docs/operator/mtb/block-access-to-multitenant-resources.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Block access to multitenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Each tenant namespace may contain resources setup by the cluster administrator for multi-tenancy, such as role bindings, and network policies. Tenants should not be allowed to modify the namespaced resources created by the cluster administrator for multi-tenancy. However, for some resources such as network policies, tenants can configure additional instances of the resource for their workloads.
|
||||
|
||||
**Rationale:** Tenants can escalate priviliges and impact other tenants if they are able to delete or modify required multi-tenancy resources such as namespace resource quotas or default network policy.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
- egress:
|
||||
- to:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the networkpolicies resources in the tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 7m5s
|
||||
capsule-oil-1 <none> 7m5s
|
||||
```
|
||||
|
||||
As tenant owner try to modify or delete one of the networkpolicies
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice delete networkpolicies capsule-oil-0
|
||||
```
|
||||
|
||||
You should receive an error message denying the edit/delete request
|
||||
|
||||
```bash
|
||||
Error from server (Forbidden): networkpolicies.networking.k8s.io "capsule-oil-0" is forbidden:
|
||||
User "oil" cannot delete resource "networkpolicies" in API group "networking.k8s.io" in the namespace "oil-production"
|
||||
```
|
||||
|
||||
As tenant owner, you can create an additional networkpolicy inside the namespace
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: hijacking
|
||||
namespace: oil-production
|
||||
spec:
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
EOF
|
||||
```
|
||||
|
||||
However, due the additive nature of networkpolicies, the `DENY ALL` policy set by the cluster admin, prevents the hijacking.
|
||||
|
||||
As tenant owner list RBAC permissions set by Capsule
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get rolebindings
|
||||
NAME ROLE AGE
|
||||
namespace-deleter ClusterRole/capsule-namespace-deleter 11h
|
||||
namespace:admin ClusterRole/admin 11h
|
||||
```
|
||||
|
||||
As tenant owner, try to change/delete the rolebindings in order to escalate permissions
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice edit/delete rolebinding namespace:admin
|
||||
```
|
||||
|
||||
The rolebindings is immediately recreated by Capsule:
|
||||
|
||||
```
|
||||
kubectl --kubeconfig alice get rolebindings
|
||||
NAME ROLE AGE
|
||||
namespace-deleter ClusterRole/capsule-namespace-deleter 11h
|
||||
namespace:admin ClusterRole/admin 2s
|
||||
```
|
||||
|
||||
However, the tenant owner can create and assign permissions inside namespace she owns
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
name: oil-robot:admin
|
||||
namespace: oil-production
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: oil-production
|
||||
EOF
|
||||
```
|
||||
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
97
docs/operator/mtb/block-access-to-other-tenant-resources.md
Normal file
97
docs/operator/mtb/block-access-to-other-tenant-resources.md
Normal file
@@ -0,0 +1,97 @@
|
||||
# Block access to other tenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Each tenant has its own set of resources, such as namespaces, service accounts, secrets, pods, services, etc. Tenants should not be allowed to access eachother's resources.
|
||||
|
||||
**Rationale:** Tenant's resources must be not accessible by other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a couple of tenants
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: joe
|
||||
EOF
|
||||
|
||||
./create-user.sh joe gas
|
||||
|
||||
```
|
||||
|
||||
As `oil` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As `gas` tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
```
|
||||
|
||||
|
||||
As `oil` tenant owner, try to retrieve the resources in the `gas` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get serviceaccounts --namespace gas-production
|
||||
```
|
||||
|
||||
You must receive an eror message:
|
||||
|
||||
```
|
||||
Error from server (Forbidden): serviceaccounts is forbidden:
|
||||
User "oil" cannot list resource "serviceaccounts" in API group "" in the namespace "gas-production"
|
||||
```
|
||||
|
||||
As `gas` tenant owner, try to retrieve the resources in the `oil` tenant namespaces
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe get serviceaccounts --namespace oil-production
|
||||
```
|
||||
|
||||
You must receive an eror message:
|
||||
|
||||
```
|
||||
Error from server (Forbidden): serviceaccounts is forbidden:
|
||||
User "joe" cannot list resource "serviceaccounts" in API group "" in the namespace "oil-production"
|
||||
```
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
121
docs/operator/mtb/block-add-capabilities.md
Normal file
121
docs/operator/mtb/block-add-capabilities.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Block add capabilities
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control Linux capabilities.
|
||||
|
||||
**Rationale:** Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities for pods that escalate the level of kernel access and allow other potentially dangerous behaviors.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` with `allowedCapabilities` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
# The default set of capabilities are implicitly allowed
|
||||
# The empty set means that no additional capabilities may be added beyond the default set
|
||||
allowedCapabilities: []
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod and see new capabilities cannot be added in the tenant namespaces
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-settime-cap
|
||||
namespace:
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- SYS_TIME
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by PodSecurityPolicy.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
69
docs/operator/mtb/block-modification-of-resource-quotas.md
Normal file
69
docs/operator/mtb/block-modification-of-resource-quotas.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Block modification of resource quotas
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Tenants should not be able to modify the resource quotas defined in their namespaces
|
||||
|
||||
**Rationale:** Resource quotas must be configured for isolation and fairness between tenants. Tenants should not be able to modify existing resource quotas as they may exhaust cluster resources and impact other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
resourceQuotas:
|
||||
items:
|
||||
- hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
- hard:
|
||||
pods: "10"
|
||||
services: "50"
|
||||
- hard:
|
||||
requests.storage: 100Gi
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, check the permissions to modify/delete the quota in the tenant namespace:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i create quota
|
||||
kubectl --kubeconfig alice auth can-i update quota
|
||||
kubectl --kubeconfig alice auth can-i patch quota
|
||||
kubectl --kubeconfig alice auth can-i delete quota
|
||||
kubectl --kubeconfig alice auth can-i deletecollection quota
|
||||
```
|
||||
|
||||
Each command must return 'no'
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
@@ -0,0 +1,107 @@
|
||||
# Block access to multitenant resources
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral
|
||||
|
||||
**Category:** Tenant Isolation
|
||||
|
||||
**Description:** Block network traffic among namespaces from different tenants.
|
||||
|
||||
**Rationale:** Tenants cannot access services and pods in another tenant's namespaces.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a couple of tenants
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: joe
|
||||
networkPolicies:
|
||||
items:
|
||||
- ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
EOF
|
||||
|
||||
./create-user.sh joe gas
|
||||
```
|
||||
|
||||
As `oil` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
kubectl --kubeconfig alice run webserver --image nginx:latest
|
||||
kubectl --kubeconfig alice expose pod webserver --port 80
|
||||
```
|
||||
|
||||
As `gas` tenant owner, run the following commands to create a namespace and resources in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig joe create ns gas-production
|
||||
kubectl --kubeconfig joe config set-context --current --namespace gas-production
|
||||
kubectl --kubeconfig joe run webserver --image nginx:latest
|
||||
kubectl --kubeconfig joe expose pod webserver --port 80
|
||||
```
|
||||
|
||||
As `oil` tenant owner, verify you can access the service in `oil` tenant namespace but not in the `gas` tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
|
||||
Viceversa, as `gas` tenant owner, verify you can access the service in `gas` tenant namespace but not in the `oil` tenant namespace
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.oil-production.svc.cluster.local
|
||||
kubectl --kubeconfig alice exec webserver -- curl http://webserver.gas-production.svc.cluster.local
|
||||
```
|
||||
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenants oil gas
|
||||
```
|
||||
115
docs/operator/mtb/block-privilege-escalation.md
Normal file
115
docs/operator/mtb/block-privilege-escalation.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Block privilege escalation
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control container permissions.
|
||||
|
||||
**Rationale:** The security `allowPrivilegeEscalation` setting allows a process to gain more privileges from its parent process. Processes in tenant containers should not be allowed to gain additional priviliges.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that sets `allowPrivilegeEscalation=false` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets `allowPrivilegeEscalation=true` in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-priviliged-mode
|
||||
namespace: oil-production
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
116
docs/operator/mtb/block-privileged-containers.md
Normal file
116
docs/operator/mtb/block-privileged-containers.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Block privileged containers
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control container permissions.
|
||||
|
||||
**Rationale:** By default a container is not allowed to access any devices on the host, but a “privileged” container can access all devices on the host. A process within a privileged container can also get unrestricted host access. Hence, tenants should not be allowed to run privileged containers.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that sets `privileged=false` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that sets privileges in its `securityContext`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-priviliged-mode
|
||||
namespace:
|
||||
labels:
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
@@ -0,0 +1,40 @@
|
||||
# Block use of existing PVs
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Data Isolation
|
||||
|
||||
**Description:** Avoid a tenant to mount existing volumes`.
|
||||
|
||||
**Rationale:** Tenants have to be assured that their Persisten Volumes cannot be reclaimed by other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, check if you can access the persistent volumes
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice auth can-i get persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i list persistentvolumes
|
||||
kubectl --kubeconfig alice auth can-i watch persistentvolumes
|
||||
```
|
||||
|
||||
You must receive for all the requests 'no'.
|
||||
115
docs/operator/mtb/block-use-of-host-ipc.md
Normal file
115
docs/operator/mtb/block-use-of-host-ipc.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Block use of host IPC
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Host Isolation
|
||||
|
||||
**Description:** Tenants should not be allowed to share the host's inter-process communication (IPC) namespace.
|
||||
|
||||
**Rationale:** The `hostIPC` setting allows pods to share the host's inter-process communication (IPC) namespace allowing potential access to host processes or processes belonging to other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that restricts `hostIPC` usage and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
hostIPC: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod mounting the host IPC namespace.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-host-ipc
|
||||
namespace: oil-production
|
||||
spec:
|
||||
hostIPC: true
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
136
docs/operator/mtb/block-use-of-host-networking-and-ports.md
Normal file
136
docs/operator/mtb/block-use-of-host-networking-and-ports.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Block use of host networking and ports
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Host Isolation
|
||||
|
||||
**Description:** Tenants should not be allowed to use host networking and host ports for their workloads.
|
||||
|
||||
**Rationale:** Using `hostPort` and `hostNetwork` allows tenants workloads to share the host networking stack allowing potential snooping of network traffic across application pods.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that restricts `hostPort` and `hostNetwork` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
hostNetwork: false
|
||||
hostPorts: [] # empty means no allowed host ports
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod using `hostNetwork`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-hostnetwork
|
||||
namespace: oil-production
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
EOF
|
||||
```
|
||||
|
||||
As tenant owner, create a pod defining a container using `hostPort`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-hostport
|
||||
namespace: oil-production
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
ports:
|
||||
- containerPort: 80
|
||||
hostPort: 9090
|
||||
EOF
|
||||
```
|
||||
|
||||
In both the cases above, you must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
129
docs/operator/mtb/block-use-of-host-path-volumes.md
Normal file
129
docs/operator/mtb/block-use-of-host-path-volumes.md
Normal file
@@ -0,0 +1,129 @@
|
||||
# Block use of host path volumes
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Host Protection
|
||||
|
||||
**Description:** Tenants should not be able to mount host volumes and directories.
|
||||
|
||||
**Rationale:** The use of host volumes and directories can be used to access shared data or escalate priviliges and also creates a tight coupling between a tenant workload and a host.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that restricts `hostPath` volumes and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
volumes: # hostPath is not permitted
|
||||
- 'configMap'
|
||||
- 'emptyDir'
|
||||
- 'projected'
|
||||
- 'secret'
|
||||
- 'downwardAPI'
|
||||
- 'persistentVolumeClaim'
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod defining a volume of type `hostpath`.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-hostpath-volume
|
||||
namespace: oil-production
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: volume
|
||||
volumes:
|
||||
- name: volume
|
||||
hostPath:
|
||||
# directory location on host
|
||||
path: /data
|
||||
type: Directory
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
115
docs/operator/mtb/block-use-of-host-pid.md
Normal file
115
docs/operator/mtb/block-use-of-host-pid.md
Normal file
@@ -0,0 +1,115 @@
|
||||
# Block use of host PID
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Host Isolation
|
||||
|
||||
**Description:** Tenants should not be allowed to share the host process ID (PID) namespace.
|
||||
|
||||
**Rationale:** The `hostPID` setting allows pods to share the host process ID namespace allowing potential privilege escalation. Tenant pods should not be allowed to share the host PID namespace.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` that restricts `hostPID` usage and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod mounting the host PID namespace.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-host-pid
|
||||
namespace: oil-production
|
||||
spec:
|
||||
hostPID: true
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
75
docs/operator/mtb/block-use-of-nodeport-services.md
Normal file
75
docs/operator/mtb/block-use-of-nodeport-services.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Block use of NodePort services
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Host Isolation
|
||||
|
||||
**Description:** Tenants should not be able to create services of type NodePort.
|
||||
|
||||
**Rationale:** the service type `NodePorts` configures host ports that cannot be secured using Kubernetes network policies and require upstream firewalls. Also, multiple tenants cannot use the same host port numbers.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
enableNodePorts: false
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, creates a service in the tenant namespace having service type of `NodePort`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
namespace: oil-production
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 80
|
||||
selector:
|
||||
run: nginx
|
||||
type: NodePort
|
||||
EOF
|
||||
```
|
||||
|
||||
You must receive an error message denying the request:
|
||||
|
||||
```
|
||||
Error from server
|
||||
Error from server (NodePort service types are forbidden for the tenant:
|
||||
error when creating "STDIN": admission webhook "services.capsule.clastix.io" denied the request:
|
||||
NodePort service types are forbidden for the tenant: please, reach out to the system administrators
|
||||
```
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
66
docs/operator/mtb/configure-namespace-object-limits.md
Normal file
66
docs/operator/mtb/configure-namespace-object-limits.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Configure namespace object limits
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration
|
||||
|
||||
**Category:** Fairness
|
||||
|
||||
**Description:** Namespace resource quotas should be used to allocate, track and limit the number of objects, of a particular type, that can be created within a namespace.
|
||||
|
||||
**Rationale:** Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
resourceQuotas:
|
||||
items:
|
||||
- hard:
|
||||
pods: 100
|
||||
services: 50
|
||||
services.loadbalancers: 3
|
||||
services.nodeports: 20
|
||||
persistentvolumeclaims: 100
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the configured quotas in the tenant namespace:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get quota
|
||||
NAME AGE REQUEST LIMIT
|
||||
capsule-oil-0 23s persistentvolumeclaims: 0/100,
|
||||
pods: 0/100, services: 0/50,
|
||||
services.loadbalancers: 0/3,
|
||||
services.nodeports: 0/20
|
||||
```
|
||||
|
||||
Make sure that a quota is configured for API objects: `PersistentVolumeClaim`, `LoadBalancer`, `NodePort`, `Pods`, etc
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
65
docs/operator/mtb/configure-namespace-resource-quotas.md
Normal file
65
docs/operator/mtb/configure-namespace-resource-quotas.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Configure namespace resource quotas
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration
|
||||
|
||||
**Category:** Fairness
|
||||
|
||||
**Description:** Namespace resource quotas should be used to allocate, track, and limit a tenant's use of shared resources.
|
||||
|
||||
**Rationale:** Resource quotas must be configured for each tenant namespace, to guarantee isolation and fairness across tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - <<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
resourceQuotas:
|
||||
items:
|
||||
- hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
- hard:
|
||||
requests.storage: 100Gi
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, retrieve the configured quotas in the tenant namespace:
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice get quota
|
||||
NAME AGE REQUEST LIMIT
|
||||
capsule-oil-0 24s requests.cpu: 0/8, requests.memory: 0/16Gi limits.cpu: 0/8, limits.memory: 0/16Gi
|
||||
capsule-oil-1 24s requests.storage: 0/10Gi
|
||||
```
|
||||
|
||||
Make sure that a quota is configured for CPU, memory, and storage resources.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
71
docs/operator/mtb/require-always-imagepullpolicy.md
Normal file
71
docs/operator/mtb/require-always-imagepullpolicy.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Require always imagePullPolicy
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Data Isolation
|
||||
|
||||
**Description:** Set the image pull policy to Always for tenant workloads.
|
||||
|
||||
**Rationale:** Tenants have to be assured that their private images can only be used by those who have the credentials to pull them.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a tenant
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
imagePullPolicies:
|
||||
- Always
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, creates a pod in the tenant namespace having `imagePullPolicies=IfNotPresent`
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: oil-production
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
EOF
|
||||
```
|
||||
|
||||
You must receive an error message denying the request:
|
||||
|
||||
```
|
||||
Error from server
|
||||
(ImagePullPolicy IfNotPresent for container nginx is forbidden, use one of the followings: Always): error when creating "STDIN": admission webhook "pods.capsule.clastix.io" denied the request:
|
||||
ImagePullPolicy IfNotPresent for container nginx is forbidden, use one of the followings: Always
|
||||
```
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
```
|
||||
124
docs/operator/mtb/require-persistentvolumeclaim-for-storage.md
Normal file
124
docs/operator/mtb/require-persistentvolumeclaim-for-storage.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Require PersistentVolumeClaim for storage
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** na
|
||||
|
||||
**Description:** Tenants should not be able to use all volume types except `PersistentVolumeClaims`.
|
||||
|
||||
**Rationale:** In some scenarios, it would be required to disallow usage of any core volume types except PVCs.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` allowing only `PersistentVolumeClaim` volumes and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
volumes:
|
||||
- 'persistentVolumeClaim'
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
namespace: oil-production
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod defining a volume of any of the core type except `PersistentVolumeClaim`. For example:
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-hostpath-volume
|
||||
namespace: oil-production
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
volumeMounts:
|
||||
- mountPath: /tmp
|
||||
name: volume
|
||||
volumes:
|
||||
- name: volume
|
||||
hostPath:
|
||||
# directory location on host
|
||||
path: /data
|
||||
type: Directory
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
87
docs/operator/mtb/require-reclaim-policy-of-delete.md
Normal file
87
docs/operator/mtb/require-reclaim-policy-of-delete.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Require PV reclaim policy of delete
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Configuration Check
|
||||
|
||||
**Category:** Data Isolation
|
||||
|
||||
**Description:** Force a tenant to use a Storage Class with `reclaimPolicy=Delete`.
|
||||
|
||||
**Rationale:** Tenants have to be assured that their Persisten Volumes cannot be reclaimed by other tenants.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, create a Storage Class with `reclaimPolicy=Delete`
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: delete-policy
|
||||
reclaimPolicy: Delete
|
||||
provisioner: clastix.io/nfs
|
||||
EOF
|
||||
```
|
||||
|
||||
As cluster admin, create a tenant and assign the above Storage Class
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
storageClasses:
|
||||
allowed:
|
||||
- delete-policy
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, creates a Persistent Volum Claim in the tenant namespace missing the Storage Class or using any other Storage Class:
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc
|
||||
namespace: oil-production
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 12Gi
|
||||
EOF
|
||||
```
|
||||
|
||||
You must receive an error message denying the request:
|
||||
|
||||
```
|
||||
Error from server (A valid Storage Class must be used, one of the following (delete-policy)):
|
||||
error when creating "STDIN": admission webhook "pvc.capsule.clastix.io" denied the request:
|
||||
A valid Storage Class must be used, one of the following (delete-policy)
|
||||
```
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete storageclass delete-policy
|
||||
```
|
||||
119
docs/operator/mtb/require-run-as-non-root-user.md
Normal file
119
docs/operator/mtb/require-run-as-non-root-user.md
Normal file
@@ -0,0 +1,119 @@
|
||||
# Require run as non-root user
|
||||
|
||||
**Profile Applicability:** L1
|
||||
|
||||
**Type:** Behavioral Check
|
||||
|
||||
**Category:** Control Plane Isolation
|
||||
|
||||
**Description:** Control container permissions.
|
||||
|
||||
**Rationale:** Processes in containers run as the root user (uid 0), by default. To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users.
|
||||
|
||||
**Audit:**
|
||||
|
||||
As cluster admin, define a `PodSecurityPolicy` with `runAsUser=MustRunAsNonRoot` and map the policy to a tenant:
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
runAsUser:
|
||||
# Require the container to run without root privileges.
|
||||
rule: MustRunAsNonRoot
|
||||
supplementalGroups:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
fsGroup:
|
||||
rule: MustRunAs
|
||||
ranges:
|
||||
# Forbid adding the root group.
|
||||
- min: 1
|
||||
max: 65535
|
||||
EOF
|
||||
```
|
||||
|
||||
> Note: make sure `PodSecurityPolicy` Admission Control is enabled on the APIs server: `--enable-admission-plugins=PodSecurityPolicy`
|
||||
|
||||
Then create a ClusterRole using or granting the said item
|
||||
|
||||
```yaml
|
||||
kubectl create -f - << EOF
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: tenant:psp
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['tenant']
|
||||
verbs: ['use']
|
||||
EOF
|
||||
```
|
||||
|
||||
And assign it to the tenant
|
||||
|
||||
```yaml
|
||||
kubectl apply -f - << EOF
|
||||
apiVersion: capsule.clastix.io/v1beta1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: tenant:psp
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
EOF
|
||||
|
||||
./create-user.sh alice oil
|
||||
```
|
||||
|
||||
As tenant owner, run the following command to create a namespace in the given tenant
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig alice create ns oil-production
|
||||
kubectl --kubeconfig alice config set-context --current --namespace oil-production
|
||||
```
|
||||
|
||||
As tenant owner, create a pod or container that does not set `runAsNonRoot` to `true` in its `securityContext`, and `runAsUser` must not be set to 0.
|
||||
|
||||
```yaml
|
||||
kubectl --kubeconfig alice apply -f - << EOF
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-run-as-root
|
||||
namespace: oil-production
|
||||
spec:
|
||||
containers:
|
||||
- name: busybox
|
||||
image: busybox:latest
|
||||
command: ["/bin/sleep", "3600"]
|
||||
EOF
|
||||
```
|
||||
|
||||
You must have the pod blocked by `PodSecurityPolicy`.
|
||||
|
||||
**Cleanup:**
|
||||
As cluster admin, delete all the created resources
|
||||
|
||||
```bash
|
||||
kubectl --kubeconfig cluster-admin delete tenant oil
|
||||
kubectl --kubeconfig cluster-admin delete PodSecurityPolicy tenant
|
||||
kubectl --kubeconfig cluster-admin delete ClusterRole tenant:psp
|
||||
```
|
||||
30
docs/operator/mtb/sig-multitenancy-bench.md
Normal file
30
docs/operator/mtb/sig-multitenancy-bench.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Meet the multi-tenancy benchmark MTB
|
||||
Actually, there's no yet a real standard for the multi-tenancy model in Kubernetes, although the [SIG multi-tenancy group](https://github.com/kubernetes-sigs/multi-tenancy) is working on that. SIG multi-tenancy drafted a generic validation schema appliable to generic multi-tenancy projects. Multi-Tenancy Benchmarks [MTB](https://github.com/kubernetes-sigs/multi-tenancy/tree/master/benchmarks) are guidelines for multi-tenant configuration of Kubernetes clusters. Capsule is an open source multi-tenacy operator and we decided to meet the requirements of MTB.
|
||||
|
||||
> N.B. At time of writing, the MTB are in development and not ready for usage. Strictly speaking, we do not claim an official conformance to MTB, but just to adhere to the multi-tenancy requirements and best practices promoted by MTB.
|
||||
|
||||
|MTB Benchmark |MTB Profile|Capsule Version|Conformance|Notes |
|
||||
|--------------|-----------|---------------|-----------|-------|
|
||||
|[Block access to cluster resources](block-access-to-cluster-resources.md)|L1|v0.1.0|✓|---|
|
||||
|[Block access to multitenant resources](block-access-to-multitenant-resources.md)|L1|v0.1.0|✓|---|
|
||||
|[Block access to other tenant resources](block-access-to-other-tenant-resources.md)|L1|v0.1.0|✓|MTB draft|
|
||||
|[Block add capabilities](block-add-capabilities.md)|L1|v0.1.0|✓|---|
|
||||
|[Require always imagePullPolicy](require-always-imagepullpolicy.md)|L1|v0.1.0|✓|---|
|
||||
|[Require run as non-root user](require-run-as-non-root-user.md)|L1|v0.1.0|✓|---|
|
||||
|[Block privileged containers](block-privileged-containers.md)|L1|v0.1.0|✓|---|
|
||||
|[Block privilege escalation](block-privilege-escalation.md)|L1|v0.1.0|✓|---|
|
||||
|[Configure namespace resource quotas](configure-namespace-resource-quotas.md)|L1|v0.1.0|✓|---|
|
||||
|[Block modification of resource quotas](block-modification-of-resource-quotas.md)|L1|v0.1.0|✓|---|
|
||||
|[Configure namespace object limits](configure-namespace-object-limits.md)|L1|v0.1.0|✓|---|
|
||||
|[Block use of host path volumes](block-use-of-host-path-volumes.md)|L1|v0.1.0|✓|---|
|
||||
|[Block use of host networking and ports](block-use-of-host-networking-and-ports.md)|L1|v0.1.0|✓|---|
|
||||
|[Block use of host PID](block-use-of-host-pid.md)|L1|v0.1.0|✓|---|
|
||||
|[Block use of host IPC](block-use-of-host-ipc.md)|L1|v0.1.0|✓|---|
|
||||
|[Block use of NodePort services](block-use-of-nodeport-services.md)|L1|v0.1.0|✓|---|
|
||||
|[Require PersistentVolumeClaim for storage](require-persistentvolumeclaim-for-storage.md)|L1|v0.1.0|✓|MTB draft|
|
||||
|[Require PV reclaim policy of delete](require-reclaim-policy-of-delete.md)|L1|v0.1.0|✓|MTB draft|
|
||||
|[Block use of existing PVs](block-use-of-existing-persistent-volumes.md)|L1|v0.1.0|✓|MTB draft|
|
||||
|[Block network access across tenant namespaces](block-network-access-across-tenant-namespaces.md)|L1|v0.1.0|✓|MTB draft|
|
||||
|[Allow self-service management of Network Policies](allow-self-service-management-of-network-policies.md)|L2|v0.1.0|✓|---|
|
||||
|[Allow self-service management of Roles](allow-self-service-management-of-roles.md)|L2|v0.1.0|✓|MTB draft|
|
||||
|[Allow self-service management of Role Bindings](allow-self-service-management-of-rolebindings.md)|L2|v0.1.0|✓|MTB draft|
|
||||
@@ -1,41 +1,9 @@
|
||||
# Kubernetes multi-tenancy made simple
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it becomes soon complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
|
||||
|
||||
# Entering Caspule
|
||||
Capsule takes a different approach. In a single cluster, it aggregates multiple namespaces in a lightweight abstraction called _Tenant_. Within each tenant, users are free to create their namespaces and share all the assigned resources while a Policy Engine keeps different tenants isolated from each other. The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. And users are free to operate their tenants in authonomy, without the intervention of the cluster administrator.
|
||||
|
||||
# Features
|
||||
## Self-Service
|
||||
Leave to developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
## Preventing Clusters Sprawl
|
||||
Share a single cluster with multiple teams, groups of users, or departments by saving operational and management efforts.
|
||||
|
||||
## Governance
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet legal requirements.
|
||||
|
||||
## Resources Control
|
||||
Take control of the resources consumed by users while preventing them to overtake.
|
||||
|
||||
## Native Experience
|
||||
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customised binaries.
|
||||
|
||||
## GitOps ready
|
||||
Capsule is completely declarative and GitOps ready.
|
||||
|
||||
## Bring your own device (BYOD)
|
||||
Assign to tenants a dedicated set of compute, storage, and network resources and avoid the noisy neighbors' effect.
|
||||
|
||||
# Common use cases for Capsule
|
||||
Please, refer to the corresponding [section](./use-cases/overview.md) in the project documentation for a detailed list of common use cases that Capsule can address.
|
||||
|
||||
# What’s next
|
||||
Have a fun with Capsule:
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. Have a fun with Capsule:
|
||||
|
||||
* [Getting Started](./getting-started.md)
|
||||
* [Use Cases](./use-cases/overview.md)
|
||||
* [SIG Multi-tenancy benchmark](./mtb/sig-multitenancy-bench.md)
|
||||
* [Run on Managed Kubernetes Services](./managed-kubernetes/getting-started-amazon-eks.md)
|
||||
* [Contributing](./contributing.md)
|
||||
* [References](./references.md)
|
||||
@@ -688,8 +688,6 @@ spec:
|
||||
userGroups: ["capsule.clastix.io"]
|
||||
forceTenantPrefix: false
|
||||
protectedNamespaceRegex: ""
|
||||
allowTenantIngressHostnamesCollision: false
|
||||
allowIngressHostnameCollision: false
|
||||
```
|
||||
|
||||
Option | Description | Default
|
||||
@@ -697,8 +695,6 @@ Option | Description | Default
|
||||
`.spec.forceTenantPrefix` | Force the tenant name as prefix for namespaces: `<tenant_name>-<namespace>`. | `false`
|
||||
`.spec.userGroups` | Array of Capsule groups to which all tenant owners must belong. | `[capsule.clastix.io]`
|
||||
`.spec.protectedNamespaceRegex` | Disallows creation of namespaces matching the passed regexp. | `null`
|
||||
`.spec.allowTenantIngressHostnamesCollision` | By default, Capsule allows Ingress hostname collision: set to `false` to enforce this policy. | `true`
|
||||
`.spec.allowIngressHostnameCollision` | Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). | `false`
|
||||
|
||||
Upon installation using Kustomize or Helm, a `default` resource will be created.
|
||||
The reference to this configuration is managed by the CLI flag `--configuration-name`.
|
||||
|
||||
@@ -23,11 +23,11 @@ spec:
|
||||
kind: User
|
||||
```
|
||||
|
||||
> If you need to address specific use-case, the said annotation supports multiple values comme separated
|
||||
>
|
||||
> ```yaml
|
||||
> capsule.clastix.io/allowed-image-pull-policy: Always,IfNotPresent
|
||||
> ```
|
||||
If you need to address specific use-case, the said annotation supports multiple values comma separated
|
||||
|
||||
```yaml
|
||||
capsule.clastix.io/allowed-image-pull-policy: Always,IfNotPresent
|
||||
```
|
||||
|
||||
# What’s next
|
||||
|
||||
|
||||
@@ -85,14 +85,14 @@ capsule-oil-0 <none> 42h
|
||||
production-network-policy <none> 3m
|
||||
```
|
||||
|
||||
an delete the namespace network-policies
|
||||
And delete the namespace network policies
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production delete networkpolicy production-network-policy
|
||||
```
|
||||
|
||||
|
||||
However, the Capsule controller prevents Alice to delete the tenant network policy:
|
||||
However, the Capsule controller prevents Alice from deleting the tenant network policy:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production delete networkpolicy capsule-oil-0
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# Assign permissions
|
||||
Alice acts as the tenant admin. Other users can operate inside the tenant with different levels of permissions and authorizations. Alice is responsible for creating additional roles and assigning these roles to other users to work in the same tenant.
|
||||
|
||||
One of the key design principles of the Capsule is the self-provisioning management from the tenant owner's perspective. Alice, the tenant owner, does not need to interact with Bill, the cluster admin, to complete her day-by-day duties. On the other side, Bill has not to deal with multiple requests coming from multiple tenant owners that probably will overwhelm him.
|
||||
One of the key design principles of the Capsule is the self-provisioning management from the tenant owner's perspective. Alice, the tenant owner, does not need to interact with Bill, the cluster admin, to complete her day-by-day duties. On the other side, Bill does not have to deal with multiple requests coming from multiple tenant owners that probably will overwhelm him.
|
||||
|
||||
Capsule leaves Alice the freedom to create RBAC roles at the namespace level, or using the pre-defined cluster roles already available in Kubernetes, and assign them to other users in the tenant. Since roles and rolebindings are limited to a namespace scope, Alice can assign the roles to the other users accessing the same tenant only after the namespace is created. This gives Alice the power to administer the tenant without the intervention of the cluster admin.
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ roleRef:
|
||||
name: 'psp:privileged'
|
||||
```
|
||||
|
||||
With the above example, Capsule is forbidding to any authenticated user in `oil-production` namespace to run privileged pods and let them to performs privilege escalation as declared by the Cluster Role `psp:privileged`.
|
||||
With the above example, Capsule is forbidding any authenticated user in `oil-production` namespace to run privileged pods and to perform privilege escalation as declared by the Cluster Role `psp:privileged`.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign to Alice the permissions to create custom resources in her tenant. [Create Custom Resources](./custom-resources.md).
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
scopes:
|
||||
- NotTerminating
|
||||
- hard:
|
||||
pods: "100"
|
||||
pods: "10"
|
||||
services: "50"
|
||||
- hard:
|
||||
requests.storage: 10Gi
|
||||
|
||||
@@ -16,4 +16,12 @@ In case of a data loss, the right thing to do is to restore the cluster with **V
|
||||
./velero-restore.sh --kubeconfing /path/to/your/kubeconfig restore
|
||||
```
|
||||
|
||||
Running this command, we are going to patch the tenant's namespaces manifests that are actually `ownerReferences`-less. Once the command has finished its run, you got the cluster back.
|
||||
Running this command, we are going to patch the tenant's namespaces manifests that are actually `ownerReferences`-less. Once the command has finished its run, you got the cluster back.
|
||||
|
||||
Additionally you can also specify a selected range of tenants to be restored:
|
||||
|
||||
```bash
|
||||
./velero-restore.sh --tenant "gas oil" restore
|
||||
```
|
||||
|
||||
In this way, only the tenants **gas** and **oil** will be restored.
|
||||
@@ -48,8 +48,8 @@ ingress:
|
||||
paths: [ "/" ]
|
||||
EOF
|
||||
|
||||
$ helm install capsule-proxy capsule-proxy \
|
||||
--valuecustom-values.yaml \
|
||||
$ helm install capsule-proxy clastix/capsule-proxy \
|
||||
--values custom-values.yaml \
|
||||
-n capsule-system
|
||||
```
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ var _ = Describe("creating a Namespace with an additional Role Binding", func()
|
||||
Name: "additional-role-binding",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "dale",
|
||||
Kind: "User",
|
||||
|
||||
@@ -23,16 +23,18 @@ var _ = Describe("enforcing an allowed set of Service external IPs", func() {
|
||||
Name: "allowed-external-ip",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "google",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{
|
||||
"10.20.0.0/16",
|
||||
"192.168.1.2/32",
|
||||
ServiceOptions: &capsulev1beta1.ServiceOptions{
|
||||
ExternalServiceIPs: &capsulev1beta1.ExternalServiceIPsSpec{
|
||||
Allowed: []capsulev1beta1.AllowedIP{
|
||||
"10.20.0.0/16",
|
||||
"192.168.1.2/32",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -23,7 +23,7 @@ var _ = Describe("enforcing a Container Registry", func() {
|
||||
Name: "container-registry",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "matt",
|
||||
Kind: "User",
|
||||
|
||||
@@ -23,7 +23,7 @@ var _ = Describe("creating a Namespace as Tenant owner with custom --capsule-gro
|
||||
Name: "tenant-assigned-custom-group",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "alice",
|
||||
Kind: "User",
|
||||
|
||||
108
e2e/disable_externalname_test.go
Normal file
108
e2e/disable_externalname_test.go
Normal file
@@ -0,0 +1,108 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("creating an ExternalName service when it is disabled for Tenant", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "disable-external-service",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "google",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
ServiceOptions: &capsulev1beta1.ServiceOptions{
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
ExternalName: pointer.BoolPtr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should fail creating a service with ExternalService type", func() {
|
||||
ns := NewNamespace("disable-external-service")
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() error {
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "cluster-ip",
|
||||
Namespace: ns.GetName(),
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Port: 8888,
|
||||
TargetPort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 8888,
|
||||
},
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
_, err := cs.CoreV1().Services(ns.Name).Create(context.Background(), svc, metav1.CreateOptions{})
|
||||
return err
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() error {
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "disable-external-service",
|
||||
Namespace: ns.GetName(),
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
Type: corev1.ServiceTypeExternalName,
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Port: 9999,
|
||||
TargetPort: intstr.IntOrString{
|
||||
Type: intstr.Int,
|
||||
IntVal: 9999,
|
||||
},
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
_, err := cs.CoreV1().Services(ns.Name).Create(context.Background(), svc, metav1.CreateOptions{})
|
||||
return err
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
})
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
@@ -23,13 +24,17 @@ var _ = Describe("creating a nodePort service when it is disabled for Tenant", f
|
||||
Name: "disable-node-ports",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "google",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
EnableNodePorts: false,
|
||||
ServiceOptions: &capsulev1beta1.ServiceOptions{
|
||||
AllowedServices: &capsulev1beta1.AllowedServices{
|
||||
NodePort: pointer.BoolPtr(false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ var _ = Describe("creating a nodePort service when it is enabled for Tenant", fu
|
||||
Name: "enable-node-ports",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "google",
|
||||
Kind: "User",
|
||||
|
||||
@@ -23,7 +23,7 @@ var _ = Describe("creating a Namespace with Tenant name prefix enforcement", fun
|
||||
Name: "awesome",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "john",
|
||||
Kind: "User",
|
||||
@@ -36,7 +36,7 @@ var _ = Describe("creating a Namespace with Tenant name prefix enforcement", fun
|
||||
Name: "awesome-tenant",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "john",
|
||||
Kind: "User",
|
||||
|
||||
@@ -22,7 +22,7 @@ var _ = Describe("enforcing some defined ImagePullPolicy", func() {
|
||||
Name: "image-pull-policies",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "alex",
|
||||
Kind: "User",
|
||||
|
||||
@@ -22,7 +22,7 @@ var _ = Describe("enforcing a defined ImagePullPolicy", func() {
|
||||
Name: "image-pull-policy",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "axel",
|
||||
Kind: "User",
|
||||
|
||||
@@ -18,24 +18,26 @@ import (
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
var _ = Describe("when Tenant handles Ingress classes with extensions/v1beta1", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-class",
|
||||
Name: "ingress-class-extensions-v1beta1",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "ingress",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{
|
||||
"nginx",
|
||||
"haproxy",
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
AllowedClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{
|
||||
"nginx",
|
||||
"haproxy",
|
||||
},
|
||||
Regex: "^oil-.*$",
|
||||
},
|
||||
Regex: "^oil-.*$",
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -50,8 +52,14 @@ var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should block a non allowed class", func() {
|
||||
ns := NewNamespace("ingress-class-disallowed")
|
||||
It("should block a non allowed class for extensions/v1beta1", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min >= 22 {
|
||||
Skip("Running test on Kubernetes " + v + ", extensions/v1beta1 has been deprecated")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-disallowed-extensions-v1beta1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
@@ -115,13 +123,19 @@ var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
})
|
||||
|
||||
It("should allow enabled class using the deprecated annotation", func() {
|
||||
ns := NewNamespace("ingress-class-allowed-annotation")
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min >= 22 {
|
||||
Skip("Running test on Kubernetes " + v + ", extensions/v1beta1 has been deprecated")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-extensions-v1beta1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
for _, c := range tnt.Spec.IngressClasses.Exact {
|
||||
for _, c := range tnt.Spec.IngressOptions.AllowedClasses.Exact {
|
||||
Eventually(func() (err error) {
|
||||
i := &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -144,18 +158,22 @@ var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
})
|
||||
|
||||
It("should allow enabled class using the ingressClassName field", func() {
|
||||
ns := NewNamespace("ingress-class-allowed-annotation")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
if maj == 1 && min < 18 {
|
||||
|
||||
switch {
|
||||
case maj == 1 && min < 18:
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide .spec.ingressClassName")
|
||||
case maj == 1 && min >= 22:
|
||||
Skip("Running test on Kubernetes " + v + ", extensions/v1beta1 has been deprecated")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-extensions-v1beta1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
for _, c := range tnt.Spec.IngressClasses.Exact {
|
||||
for _, c := range tnt.Spec.IngressOptions.AllowedClasses.Exact {
|
||||
Eventually(func() (err error) {
|
||||
i := &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@@ -176,7 +194,13 @@ var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
})
|
||||
|
||||
It("should allow enabled Ingress by regex using the deprecated annotation", func() {
|
||||
ns := NewNamespace("ingress-class-allowed-annotation")
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min >= 22 {
|
||||
Skip("Running test on Kubernetes " + v + ", extensions/v1beta1 has been deprecated")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-extensions-v1beta1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
ingressClass := "oil-ingress"
|
||||
|
||||
@@ -204,15 +228,19 @@ var _ = Describe("when Tenant handles Ingress classes", func() {
|
||||
})
|
||||
|
||||
It("should allow enabled Ingress by regex using the ingressClassName field", func() {
|
||||
ns := NewNamespace("ingress-class-allowed-annotation")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
ingressClass := "oil-haproxy"
|
||||
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
if maj == 1 && min < 18 {
|
||||
|
||||
switch {
|
||||
case maj == 1 && min >= 22:
|
||||
Skip("Running test on Kubernetes " + v + ", extensions/v1beta1 has been deprecated")
|
||||
case maj == 1 && min < 18:
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide .spec.ingressClassName")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-extensions-v1beta1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
ingressClass := "oil-haproxy"
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
285
e2e/ingress_class_networking_test.go
Normal file
285
e2e/ingress_class_networking_test.go
Normal file
@@ -0,0 +1,285 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when Tenant handles Ingress classes with networking.k8s.io/v1", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-class-networking-v1",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
{
|
||||
Name: "ingress",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
AllowedClasses: &capsulev1beta1.AllowedListSpec{
|
||||
Exact: []string{
|
||||
"nginx",
|
||||
"haproxy",
|
||||
},
|
||||
Regex: "^oil-.*$",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should block a non allowed class", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min < 19 {
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide networking.k8s.io/v1")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-disallowed-networking-v1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
By("non-specifying at all", func() {
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "denied-ingress",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).ShouldNot(Succeed())
|
||||
})
|
||||
By("defining as deprecated annotation", func() {
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "denied-ingress",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "the-worst-ingress-available",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).ShouldNot(Succeed())
|
||||
})
|
||||
By("using the ingressClassName", func() {
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "denied-ingress",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: pointer.StringPtr("the-worst-ingress-available"),
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).ShouldNot(Succeed())
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow enabled class using the deprecated annotation for networking.k8s.io/v1", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min < 19 {
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide networking.k8s.io/v1")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-networking-v1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
for _, c := range tnt.Spec.IngressOptions.AllowedClasses.Exact {
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c,
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": c,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("should allow enabled class using the ingressClassName field", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min < 19 {
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide networking.k8s.io/v1")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-networking-v1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
for _, c := range tnt.Spec.IngressOptions.AllowedClasses.Exact {
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: &c,
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
It("should allow enabled Ingress by regex using the deprecated annotation", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min < 19 {
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide networking.k8s.io/v1")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-networking-v1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
ingressClass := "oil-ingress"
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ingressClass,
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": ingressClass,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should allow enabled Ingress by regex using the ingressClassName field", func() {
|
||||
maj, min, v := GetKubernetesSemVer()
|
||||
|
||||
if maj == 1 && min < 19 {
|
||||
Skip("Running test on Kubernetes " + v + ", doesn't provide networking.k8s.io/v1")
|
||||
}
|
||||
|
||||
ns := NewNamespace("ingress-class-allowed-annotation-networking-v1")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
ingressClass := "oil-haproxy"
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
Eventually(func() (err error) {
|
||||
i := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ingressClass,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: &ingressClass,
|
||||
DefaultBackend: &networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "foo",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), i, metav1.CreateOptions{})
|
||||
return
|
||||
}, 600, defaultPollInterval).Should(Succeed())
|
||||
})
|
||||
})
|
||||
@@ -1,172 +0,0 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when handling Ingress hostnames collision", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ingress-hostnames-allowed-collision",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: []capsulev1beta1.OwnerSpec{
|
||||
{
|
||||
Name: "ingress-allowed",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// scaffold a basic networking.k8s.io Ingress with name and host
|
||||
networkingIngress := func(name, hostname string) *networkingv1.Ingress {
|
||||
return &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
// scaffold a basic extensions Ingress with name and host
|
||||
extensionsIngress := func(name, hostname string) *extensionsv1beta1.Ingress {
|
||||
return &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensionsv1beta1.IngressSpec{
|
||||
Rules: []extensionsv1beta1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
|
||||
ModifyCapsuleConfigurationOpts(func(configuration *capsulev1alpha1.CapsuleConfiguration) {
|
||||
configuration.Spec.AllowIngressHostnameCollision = true
|
||||
})
|
||||
})
|
||||
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
|
||||
ModifyCapsuleConfigurationOpts(func(configuration *capsulev1alpha1.CapsuleConfiguration) {
|
||||
configuration.Spec.AllowIngressHostnameCollision = false
|
||||
})
|
||||
})
|
||||
|
||||
It("should not allow creating several Ingress with same hostname", func() {
|
||||
ModifyCapsuleConfigurationOpts(func(configuration *capsulev1alpha1.CapsuleConfiguration) {
|
||||
configuration.Spec.AllowIngressHostnameCollision = false
|
||||
})
|
||||
|
||||
maj, min, _ := GetKubernetesSemVer()
|
||||
|
||||
ns := NewNamespace("denied-collision")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
if maj == 1 && min > 18 {
|
||||
By("testing networking.k8s.io", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-1", "kubernetes.io")
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-2", "kubernetes.io")
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
if maj == 1 && min < 22 {
|
||||
By("testing extensions", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-1", "cncf.io")
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-2", "cncf.io")
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
It("should allow creating several Ingress with same hostname", func() {
|
||||
maj, min, _ := GetKubernetesSemVer()
|
||||
|
||||
ns := NewNamespace("allowed-collision")
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
if maj == 1 && min > 18 {
|
||||
By("testing networking.k8s.io", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-1", "kubernetes.io")
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-2", "kubernetes.io")
|
||||
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
if maj == 1 && min < 22 {
|
||||
By("testing extensions", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-1", "cncf.io")
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-2", "cncf.io")
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
return
|
||||
}).Should(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
221
e2e/ingress_hostnames_collision_cluster_scope_test.go
Normal file
221
e2e/ingress_hostnames_collision_cluster_scope_test.go
Normal file
@@ -0,0 +1,221 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when handling Cluster scoped Ingress hostnames collision", func() {
|
||||
tnt1 := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hostnames-collision-cluster-one",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "ingress-tenant-one",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeCluster,
|
||||
},
|
||||
},
|
||||
}
|
||||
tnt2 := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hostnames-collision-cluster-two",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "ingress-tenant-two",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeCluster,
|
||||
},
|
||||
},
|
||||
}
|
||||
// scaffold a basic networking.k8s.io Ingress with name and host
|
||||
networkingIngress := func(name, hostname, path string) *networkingv1.Ingress {
|
||||
return &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v networkingv1.PathType) *networkingv1.PathType {
|
||||
return &v
|
||||
}(networkingv1.PathTypeExact),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "example",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
// scaffold a basic extensions Ingress with name and host
|
||||
extensionsIngress := func(name, hostname, path string) *extensionsv1beta1.Ingress {
|
||||
return &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensionsv1beta1.IngressSpec{
|
||||
Rules: []extensionsv1beta1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v extensionsv1beta1.PathType) *extensionsv1beta1.PathType {
|
||||
return &v
|
||||
}(extensionsv1beta1.PathTypeExact),
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "example",
|
||||
ServicePort: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt1.ResourceVersion = ""
|
||||
|
||||
return k8sClient.Create(context.TODO(), tnt1)
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() error {
|
||||
tnt2.ResourceVersion = ""
|
||||
|
||||
return k8sClient.Create(context.TODO(), tnt2)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt1)).Should(Succeed())
|
||||
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt2)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should ensure Cluster scope for Ingress hostname and path collision", func() {
|
||||
maj, min, _ := GetKubernetesSemVer()
|
||||
|
||||
ns1 := NewNamespace("tenant-one-ns")
|
||||
|
||||
cs1 := ownerClient(tnt1.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns1, tnt1.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
|
||||
TenantNamespaceList(tnt1, defaultTimeoutInterval).Should(ContainElement(ns1.GetName()))
|
||||
|
||||
ns2 := NewNamespace("tenant-two-ns")
|
||||
|
||||
cs2 := ownerClient(tnt2.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns2, tnt2.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
|
||||
TenantNamespaceList(tnt2, defaultTimeoutInterval).Should(ContainElement(ns2.GetName()))
|
||||
|
||||
if maj == 1 && min > 18 {
|
||||
By("testing networking.k8s.io", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-1", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs1.NetworkingV1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// Creating a second Ingress with same hostname but a different path in a Namespace managed by the same
|
||||
// Tenant should not trigger a collision...
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-2", "kubernetes.io", "/docs")
|
||||
|
||||
_, err = cs2.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// ...but it happens if hostname and path collide with the first Ingress,
|
||||
// although in a different Namespace
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-3", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs2.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
if maj == 1 && min < 22 {
|
||||
By("testing extensions", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-1", "cncf.io", "/foo")
|
||||
|
||||
_, err = cs1.ExtensionsV1beta1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// Creating a second Ingress with same hostname but a different path in a Namespace managed by the same
|
||||
// Tenant should not trigger a collision...
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-2", "cncf.io", "/bar")
|
||||
|
||||
_, err = cs2.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// ...but it happens if hostname and path collide with the first Ingress,
|
||||
// although in a different Namespace
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-3", "cncf.io", "/foo")
|
||||
|
||||
_, err = cs2.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
204
e2e/ingress_hostnames_collision_disabled_test.go
Normal file
204
e2e/ingress_hostnames_collision_disabled_test.go
Normal file
@@ -0,0 +1,204 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when disabling Ingress hostnames collision", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hostnames-collision-disabled",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "ingress-disabled",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeDisabled,
|
||||
},
|
||||
},
|
||||
}
|
||||
// scaffold a basic networking.k8s.io Ingress with name and host
|
||||
networkingIngress := func(name, hostname, path string) *networkingv1.Ingress {
|
||||
return &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v networkingv1.PathType) *networkingv1.PathType {
|
||||
return &v
|
||||
}(networkingv1.PathTypeExact),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "example",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
// scaffold a basic extensions Ingress with name and host
|
||||
extensionsIngress := func(name, hostname, path string) *extensionsv1beta1.Ingress {
|
||||
return &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensionsv1beta1.IngressSpec{
|
||||
Rules: []extensionsv1beta1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v extensionsv1beta1.PathType) *extensionsv1beta1.PathType {
|
||||
return &v
|
||||
}(extensionsv1beta1.PathTypeExact),
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "example",
|
||||
ServicePort: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should not check any kind of collision", func() {
|
||||
maj, min, _ := GetKubernetesSemVer()
|
||||
|
||||
ns1 := NewNamespace("namespace-collision-one")
|
||||
|
||||
ns2 := NewNamespace("namespace-collision-two")
|
||||
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns1, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
NamespaceCreation(ns2, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns1.GetName()))
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns2.GetName()))
|
||||
|
||||
if maj == 1 && min > 18 {
|
||||
By("testing networking.k8s.io", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-1", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-2", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-3", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-4", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
if maj == 1 && min < 22 {
|
||||
By("testing extensions", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-1", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-2", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-3", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-4", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
206
e2e/ingress_hostnames_collision_namespace_scope_test.go
Normal file
206
e2e/ingress_hostnames_collision_namespace_scope_test.go
Normal file
@@ -0,0 +1,206 @@
|
||||
//+build e2e
|
||||
|
||||
// Copyright 2020-2021 Clastix Labs
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
|
||||
)
|
||||
|
||||
var _ = Describe("when handling Namespace scoped Ingress hostnames collision", func() {
|
||||
tnt := &capsulev1beta1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "hostnames-collision-namespace",
|
||||
},
|
||||
Spec: capsulev1beta1.TenantSpec{
|
||||
Owners: capsulev1beta1.OwnerListSpec{
|
||||
{
|
||||
Name: "ingress-namespace",
|
||||
Kind: "User",
|
||||
},
|
||||
},
|
||||
IngressOptions: capsulev1beta1.IngressOptions{
|
||||
HostnameCollisionScope: capsulev1beta1.HostnameCollisionScopeNamespace,
|
||||
},
|
||||
},
|
||||
}
|
||||
// scaffold a basic networking.k8s.io Ingress with name and host
|
||||
networkingIngress := func(name, hostname, path string) *networkingv1.Ingress {
|
||||
return &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v networkingv1.PathType) *networkingv1.PathType {
|
||||
return &v
|
||||
}(networkingv1.PathTypeExact),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "example",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 8080,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
// scaffold a basic extensions Ingress with name and host
|
||||
extensionsIngress := func(name, hostname, path string) *extensionsv1beta1.Ingress {
|
||||
return &extensionsv1beta1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: extensionsv1beta1.IngressSpec{
|
||||
Rules: []extensionsv1beta1.IngressRule{
|
||||
{
|
||||
Host: hostname,
|
||||
IngressRuleValue: extensionsv1beta1.IngressRuleValue{
|
||||
HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
|
||||
Paths: []extensionsv1beta1.HTTPIngressPath{
|
||||
{
|
||||
Path: path,
|
||||
PathType: func(v extensionsv1beta1.PathType) *extensionsv1beta1.PathType {
|
||||
return &v
|
||||
}(extensionsv1beta1.PathTypeExact),
|
||||
Backend: extensionsv1beta1.IngressBackend{
|
||||
ServiceName: "example",
|
||||
ServicePort: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should ensure Namespace scope for Ingress hostname and path collision", func() {
|
||||
maj, min, _ := GetKubernetesSemVer()
|
||||
|
||||
ns1 := NewNamespace("namespace-collision-one")
|
||||
|
||||
ns2 := NewNamespace("namespace-collision-two")
|
||||
|
||||
cs := ownerClient(tnt.Spec.Owners[0])
|
||||
|
||||
NamespaceCreation(ns1, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
NamespaceCreation(ns2, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns1.GetName()))
|
||||
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns2.GetName()))
|
||||
|
||||
if maj == 1 && min > 18 {
|
||||
By("testing networking.k8s.io", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-1", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// A same Ingress with hostname and path pair can be created in a different Namespace,
|
||||
// although of the same Tenant
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-2", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// ...but a collision occurs if the same pair is created in the same Namespace
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-3", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := networkingIngress("networking-4", "kubernetes.io", "/path")
|
||||
|
||||
_, err = cs.NetworkingV1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
|
||||
if maj == 1 && min < 22 {
|
||||
By("testing extensions", func() {
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-1", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// A same Ingress with hostname and path pair can be created in a different Namespace,
|
||||
// although of the same Tenant
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-2", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).Should(Succeed())
|
||||
// ...but a collision occurs if the same pair is created in the same Namespace
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-3", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns1.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
|
||||
EventuallyCreation(func() (err error) {
|
||||
obj := extensionsIngress("extensions-4", "cncf.io", "/docs")
|
||||
|
||||
_, err = cs.ExtensionsV1beta1().Ingresses(ns2.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
|
||||
|
||||
return
|
||||
}).ShouldNot(Succeed())
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user