Compare commits

...

35 Commits

Author SHA1 Message Date
Dario Tranchitella
37ec9911d9 chore: non embedding certs for kubeconfig file generation (#238) 2021-03-17 17:28:57 +01:00
Ludovico Russo
36124d2aba build(helm): remove options allow-ingress-hostname-collision and allow-tenant-ingress-hostnames-collision (#233)
These are going to be implemented once 0.0.5 is out with new flags.

Co-authored-by: Ludovico Russo <ludovico@ludusrusso.space>
2021-03-17 11:44:57 +01:00
Dario Tranchitella
5ecabaad3e refactor: ignoring requests from kube-system ServiceAccount resources 2021-03-17 11:43:11 +01:00
Valentino Uberti
56adfe6a35 feat: user script for Openshift (#230)
Tested by @ValentinoUberti on OCP 4.7.1
2021-03-10 15:11:15 +01:00
Dario Tranchitella
4119a69e02 fix: hostname collision between different Tenant namespaces 2021-03-06 20:50:55 +01:00
Dario Tranchitella
51de469551 bug: syncing Namespace annotations in a single place 2021-03-06 17:41:18 +01:00
Dario Tranchitella
87a360bfaf build(helm): support for --allow-tenant-ingress-hostnames-collision flag 2021-03-06 16:58:44 +01:00
Dario Tranchitella
bdce4a7b4f doc: documenting --allow-tenant-ingress-hostnames-collision new CLI flag 2021-03-06 16:58:44 +01:00
Dario Tranchitella
0dedd48789 test: new flag --allow-tenant-ingress-hostnames-collision 2021-03-06 16:58:44 +01:00
Dario Tranchitella
dfb7a5e227 feat: allowing Tenants with collided Ingress hostnames
A new flag (`--allow-tenant-ingress-hostnames-collision`) is added,
defaulted to false: when toggled, Capsule will not check if each
declared hostname in `.spec.IngressHostnames.allowed` is already in use
on any other Tenant.
2021-03-06 16:58:44 +01:00
Dario Tranchitella
d78bcd8b00 test(e2e): using default timeout and interval periods 2021-03-06 15:57:25 +01:00
Dario Tranchitella
0cad87e1ed test(e2e): avoiding reaping of unhealthy nodes blocking CI 2021-03-06 15:57:25 +01:00
Dario Tranchitella
74b0594cf4 feat(helm): customizable liveness and readiness probes 2021-03-06 15:57:25 +01:00
Dario Tranchitella
7fef4e5237 bug: type-switching on Ingress webhook for hostname collision 2021-03-06 15:06:18 +01:00
Dario Tranchitella
4a7c522eb5 bug: disabling ingresses.networking.k8s.io indexers on k8s < 1.19 2021-03-06 15:06:18 +01:00
Dario Tranchitella
8319bd3a85 build(helm): support for allow Ingress hostname collision 2021-03-05 22:50:35 +01:00
Dario Tranchitella
5d3770ae8d doc: documenting --allow-ingress-hostname-collision CLI flag 2021-03-05 22:50:35 +01:00
Dario Tranchitella
3fa78ea3df test: testing Ingress hostname collision 2021-03-05 22:50:35 +01:00
Dario Tranchitella
4fbede0989 feat: Ingress hostnames collision check
Disabled by default to avoid breaking changes for upcoming release,
although minor will be enabled by default.

Using the new `--allow-ingress-hostname-collision` flag Capsule can
ignore the Ingress hostnames collision allowing the Cluster
Administrator to put in place a non-opinionated hostnames allocation.
2021-03-05 22:50:35 +01:00
Davide Imola
d7b19a4930 build: using Docker build args for build metadata (#217) 2021-03-05 21:02:41 +01:00
Dario Tranchitella
452bceff34 fix: additional metadata must be controlled just from Tenant manifest (#211) 2021-03-04 10:02:14 +01:00
Erin Corson
2ea36db5d6 fix(typo): fixing typo in several webhook error messages (#212)
Co-authored-by: Erin Corson <ecorson@vmware.com>
2021-03-04 08:24:11 +01:00
Davide Imola
737b6ce65a Fix link to script (#210) 2021-03-01 22:50:24 +01:00
Unai Arríen
666faeb72a build(helm): making customizable components Docker images (#209) 2021-03-01 17:26:43 +01:00
Don High
4f34483dee Documentation Spelling Mistakes #197 (#203)
* Update README.md

Proof Read the README.md

* Update index.md

Proof Read index.md

* Update overview.md

Proof Read overview.md

* Update onboarding.md

Proof Read onboarding.md

* Update create-namespaces.md

Proof Read create-namespaces.md

* Update permissions.md

Proof Read permissons.md

* Update resources-quota-limits.md

Proof Read resources-quota-limits.md

* Update nodes-pool.md

Proof Read nodes-pool.md

* Update ingress-classes.md

Proof Read ingress-classes.md

* Update ingress-hostnames.md

Proof Read ingress-hostnames.md

* Update storage-classes.md

Proof Read storage-classes.md

* Update images-registries.md

Proof Read images-registries.md

* Update custom-resources.md

Proof Read custom-resources.md

* Update multiple-tenants.md

Proof Read multiple-tenants.md

* Update README.md

Updated the Suggested text

* Update README.md

Made the correction

* Update docs/operator/use-cases/images-registries.md

Co-authored-by: Don High <donghigh@yahoo.com>

Co-authored-by: Dario Tranchitella <dario@tranchitella.eu>
2021-02-19 11:40:20 +01:00
Erin Corson
e3b927f112 some typos and whatnot (#201)
Co-authored-by: Erin Corson <ecorson@vmware.com>
2021-02-16 22:18:02 +01:00
Dario Tranchitella
d9220f1e15 build(helm): avoiding deletion of Capsule secrets on Helm upgrade (#194) 2021-02-08 17:23:44 +01:00
Dario Tranchitella
f03e36e774 test: creating namespace and forcing upload of last built image (#195) 2021-02-08 17:19:23 +01:00
Brian Fox
7c30390206 docs: fix type in README.md (#198) 2021-02-03 16:00:18 +01:00
Dario Tranchitella
16906db309 Validating Tenant also on UPDATE (#191) 2021-01-21 07:11:59 +01:00
Adriano Pezzuto
d25ed7f2df Helm Chart icon fix (#192) 2021-01-16 14:01:13 +01:00
Dario Tranchitella
51f5bec5a6 Fixing the IngressClass return logic breaking Hostnames check (#185) 2021-01-15 09:45:09 +01:00
Dario Tranchitella
d3f3f93a24 CRD schema do not must preserving unknown fields (#188) 2021-01-15 09:44:04 +01:00
Dario Tranchitella
24bd363ee0 Updating v0.0.4 also for Kustomization installation (#186) 2021-01-14 19:22:14 +01:00
Dario Tranchitella
504241a948 Bumping Capsule to v0.0.4 (#183) 2021-01-14 00:00:02 +01:00
59 changed files with 1133 additions and 306 deletions

2
.gitignore vendored
View File

@@ -24,5 +24,7 @@ bin
*~
**/*.kubeconfig
**/*.crt
**/*.key
.DS_Store

View File

@@ -1,6 +1,13 @@
# Build the manager binary
FROM golang:1.13 as builder
ARG GIT_HEAD_COMMIT
ARG GIT_TAG_COMMIT
ARG GIT_LAST_TAG
ARG GIT_MODIFIED
ARG GIT_REPO
ARG BUILD_DATE
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
@@ -15,20 +22,11 @@ COPY version.go version.go
COPY api/ api/
COPY controllers/ controllers/
COPY pkg/ pkg/
COPY .git .git
# Build
RUN git config --get remote.origin.url > /tmp/GIT_REPO && \
git rev-parse --short HEAD > /tmp/GIT_HEAD_COMMIT && \
git describe --abbrev=0 --tags > /tmp/GIT_LAST_TAG && \
git rev-parse --short $(cat /tmp/GIT_LAST_TAG) > /tmp/GIT_TAG_COMMIT && \
git diff $(cat /tmp/GIT_HEAD_COMMIT) $(cat /tmp/GIT_TAG_COMMIT) --quiet > /tmp/GIT_MODIFIED1 || echo '.dev' > /tmp/GIT_MODIFIED1 && \
git diff --quiet > /tmp/GIT_MODIFIED2 || echo '.dirty' > /tmp/GIT_MODIFIED2 && \
cat /tmp/GIT_MODIFIED1 /tmp/GIT_MODIFIED2 | tr -d '\n' > /tmp/GIT_MODIFIED && \
date '+%Y-%m-%dT%H:%M:%S' > /tmp/BUILD_DATE &&\
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build \
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build \
-gcflags "-N -l" \
-ldflags "-X main.GitRepo=$(cat /tmp/GIT_REPO) -X main.GitTag=$(cat /tmp/GIT_LAST_TAG) -X main.GitCommit=$(cat /tmp/GIT_HEAD_COMMIT) -X main.GitDirty=$(cat /tmp/GIT_MODIFIED) -X main.BuildTime=$(cat /tmp/BUILD_DATE)" \
-ldflags "-X main.GitRepo=$GIT_REPO -X main.GitTag=$GIT_LAST_TAG -X main.GitCommit=$GIT_HEAD_COMMIT -X main.GitDirty=$GIT_MODIFIED -X main.BuildTime=$BUILD_DATE" \
-o manager
# Use distroless as minimal base image to package the manager binary

View File

@@ -15,7 +15,7 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
# Image URL to use all building/pushing image targets
IMG ?= quay.io/clastix/capsule:$(VERSION)
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
@@ -24,6 +24,15 @@ else
GOBIN=$(shell go env GOBIN)
endif
# Get information about git current status
GIT_HEAD_COMMIT ?= $$(git rev-parse --short HEAD)
GIT_TAG_COMMIT ?= $$(git rev-parse --short $(VERSION))
GIT_MODIFIED_1 ?= $$(git diff $(GIT_HEAD_COMMIT) $(GIT_TAG_COMMIT) --quiet && echo "" || echo ".dev")
GIT_MODIFIED_2 ?= $$(git diff --quiet && echo "" || echo ".dirty")
GIT_MODIFIED ?= $$(echo "$(GIT_MODIFIED_1)$(GIT_MODIFIED_2)")
GIT_REPO ?= $$(git config --get remote.origin.url)
BUILD_DATE ?= $$(date '+%Y-%m-%dT%H:%M:%S')
all: manager
# Run tests
@@ -75,7 +84,12 @@ generate: controller-gen
# Build the docker image
docker-build: test
docker build . -t ${IMG}
docker build . -t ${IMG} --build-arg GIT_HEAD_COMMIT=$(GIT_HEAD_COMMIT) \
--build-arg GIT_TAG_COMMIT=$(GIT_TAG_COMMIT) \
--build-arg GIT_MODIFIED=$(GIT_MODIFIED) \
--build-arg GIT_REPO=$(GIT_REPO) \
--build-arg GIT_LAST_TAG=$(VERSION) \
--build-arg BUILD_DATE=$(BUILD_DATE)
# Push the docker image
docker-push:
@@ -139,7 +153,16 @@ e2e/%:
kind create cluster --name capsule --image=kindest/node:$*
make docker-build
kind load docker-image --nodes capsule-control-plane --name capsule $(IMG)
kubectl create namespace capsule-system
helm upgrade --install --namespace capsule-system capsule ./charts/capsule --set 'manager.image.pullPolicy=Never' --set 'manager.resources=null'
helm upgrade \
--debug \
--install \
--namespace capsule-system \
--create-namespace capsule \
--set 'manager.image.pullPolicy=Never' \
--set 'manager.resources=null'\
--set "manager.image.tag=$(VERSION)" \
--set 'manager.livenessProbe.failureThreshold=10' \
--set 'manager.readinessProbe.failureThreshold=10' \
./charts/capsule
ginkgo -v -tags e2e ./e2e
kind delete cluster --name capsule

View File

@@ -14,15 +14,17 @@
---
# Kubernetes multi-tenancy made simple
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services-based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
# What's the problem with the current status?
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it becomes soon complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
# Entering Caspule
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_. Within each tenant, users are free to create their namespaces and share all the assigned resources while the Capsule Policy Engine keeps the different tenants isolated from each other.
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it soon becomes complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. And users are free to operate their tenants in authonomy, without the intervention of the cluster administrator. Take a look at following diagram:
# Entering Capsule
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_, basically a grouping of Kubernetes Namespaces. Within each tenant, users are free to create their namespaces and share all the assigned resources while the Capsule Policy Engine keeps the different tenants isolated from each other.
The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. Then users are free to operate their tenants in autonomy, without the intervention of the cluster administrator. Take a look at following diagram:
<p align="center" style="padding: 60px 20px">
<img src="assets/capsule-operator.svg" />
@@ -42,7 +44,7 @@ Leverage Kubernetes Admission Controllers to enforce the industry security best
Take control of the resources consumed by users while preventing them to overtake.
## Native Experience
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customised binaries.
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customized binaries.
## GitOps ready
Capsule is completely declarative and GitOps ready.
@@ -166,7 +168,7 @@ $ make remove
```
# FAQ
- Q. How to pronunce Capsule?
- Q. How to pronounce Capsule?
A. It should be pronounced as `/ˈkæpsjuːl/`.
@@ -180,7 +182,7 @@ $ make remove
- Q. Does it work with my Kuberentes XYZ distribution?
A. We tested Capsule with vanilla Kubernetes 1.16+ on private envirnments and public clouds. We expect it works smootly on any other distribution. Please, let us know if you find it doesn't.
A. We tested Capsule with vanilla Kubernetes 1.16+ on private environments and public clouds. We expect it to work smoothly on any other Kubernetes distribution. Please, let us know if you find it doesn't.
- Q. Do you provide commercial support?

101
assets/logo/capsule.svg Normal file
View File

@@ -0,0 +1,101 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 24.2.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<svg version="1.1" id="Livello_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 595.28 841.89" style="enable-background:new 0 0 595.28 841.89;" xml:space="preserve">
<style type="text/css">
.st0{fill:#274872;}
.st1{fill:#314A70;}
.st2{fill:#5783AB;}
.st3{fill:#EAECEC;}
</style>
<path class="st0" d="M243.53,178.65c-0.06-4.5-0.37-9.02,0-13.49c0.1-1.22,2.13-3.09,3.45-3.25c6.99-0.88,14.03-1.47,21.07-1.8
c2.43-0.12,3.48-1.05,4.29-3.12c2-5.14,4.08-10.25,6.32-15.29c0.86-1.93,0.56-2.83-1.2-4.09c-4.42-3.15-4.97-8.41-1.6-12.08
c3.7-4.04,8.88-4.09,12.65-0.12c3.5,3.68,3.07,8.88-1.39,12.08c-1.93,1.39-2.08,2.44-1.22,4.44c2.19,5.06,3.96,10.31,6.33,15.27
c0.65,1.37,2.73,2.73,4.28,2.89c7.57,0.77,15.19,1.17,22.79,1.64c2.69,0.16,4.13,1.28,4.21,4.15c0.1,3.95,0.43,7.89,0.66,11.84
c-1.51,0.05-3.03,0.22-4.53,0.13c-12.54-0.76-37.47-2.65-37.47-2.65S254.81,177.52,243.53,178.65z"/>
<g>
<path class="st1" d="M73.32,483.91c-5.2-2.69-9.26-6.43-12.18-11.22c-2.92-4.78-4.38-10.21-4.38-16.28c0-6.07,1.46-11.5,4.38-16.28
c2.92-4.78,6.98-8.52,12.18-11.22c5.2-2.69,11.06-4.04,17.59-4.04c6.45,0,12.09,1.35,16.91,4.04c4.82,2.7,8.33,6.55,10.53,11.56
l-13.78,7.4c-3.19-5.62-7.78-8.43-13.78-8.43c-4.63,0-8.47,1.52-11.5,4.55c-3.04,3.04-4.55,7.17-4.55,12.41
c0,5.24,1.52,9.38,4.55,12.41c3.04,3.04,6.87,4.55,11.5,4.55c6.07,0,10.66-2.81,13.78-8.43l13.78,7.52
c-2.2,4.86-5.71,8.65-10.53,11.39c-4.82,2.73-10.46,4.1-16.91,4.1C84.38,487.95,78.52,486.6,73.32,483.91z"/>
<path class="st1" d="M175.17,431.64c5.08,4.52,7.63,11.33,7.63,20.44v34.96h-16.62v-7.63c-3.34,5.69-9.56,8.54-18.67,8.54
c-4.71,0-8.79-0.8-12.24-2.39c-3.46-1.59-6.09-3.79-7.91-6.6c-1.82-2.81-2.73-6-2.73-9.56c0-5.69,2.14-10.17,6.43-13.44
c4.29-3.26,10.91-4.9,19.87-4.9h14.12c0-3.87-1.18-6.85-3.53-8.94c-2.35-2.09-5.88-3.13-10.59-3.13c-3.26,0-6.47,0.51-9.62,1.54
c-3.15,1.03-5.83,2.41-8.03,4.16l-6.38-12.41c3.34-2.35,7.34-4.17,12.01-5.47c4.67-1.29,9.47-1.94,14.4-1.94
C162.8,424.87,170.08,427.13,175.17,431.64z M160.03,473.89c2.35-1.4,4.02-3.47,5.01-6.21v-6.26h-12.18
c-7.29,0-10.93,2.39-10.93,7.17c0,2.28,0.89,4.08,2.68,5.41c1.78,1.33,4.23,1.99,7.34,1.99
C154.98,475.99,157.67,475.29,160.03,473.89z"/>
<path class="st1" d="M250.6,428.8c4.67,2.62,8.33,6.3,10.99,11.04c2.66,4.75,3.99,10.27,3.99,16.57s-1.33,11.82-3.99,16.57
c-2.66,4.75-6.32,8.43-10.99,11.04s-9.85,3.93-15.54,3.93c-7.82,0-13.97-2.47-18.45-7.4v28.58h-17.76v-83.35h16.97v7.06
c4.4-5.31,10.82-7.97,19.24-7.97C240.76,424.87,245.94,426.18,250.6,428.8z M243.2,468.76c2.92-3.07,4.38-7.19,4.38-12.35
s-1.46-9.28-4.38-12.35c-2.92-3.07-6.66-4.61-11.22-4.61s-8.29,1.54-11.22,4.61c-2.92,3.07-4.38,7.19-4.38,12.35
s1.46,9.28,4.38,12.35c2.92,3.07,6.66,4.61,11.22,4.61S240.28,471.84,243.2,468.76z"/>
<path class="st1" d="M283.11,486.07c-4.86-1.25-8.73-2.83-11.61-4.73l5.92-12.75c2.73,1.75,6.03,3.17,9.91,4.27
c3.87,1.1,7.67,1.65,11.39,1.65c7.51,0,11.27-1.86,11.27-5.58c0-1.75-1.03-3-3.07-3.76c-2.05-0.76-5.2-1.4-9.45-1.94
c-5.01-0.76-9.15-1.63-12.41-2.62c-3.26-0.99-6.09-2.73-8.48-5.24s-3.59-6.07-3.59-10.7c0-3.87,1.12-7.3,3.36-10.3
c2.24-3,5.5-5.33,9.79-7c4.29-1.67,9.35-2.5,15.2-2.5c4.33,0,8.63,0.48,12.92,1.42c4.29,0.95,7.84,2.26,10.65,3.93l-5.92,12.64
c-5.39-3.04-11.27-4.55-17.65-4.55c-3.8,0-6.64,0.53-8.54,1.59c-1.9,1.06-2.85,2.43-2.85,4.1c0,1.9,1.02,3.23,3.07,3.99
c2.05,0.76,5.31,1.48,9.79,2.16c5.01,0.84,9.11,1.73,12.3,2.68c3.19,0.95,5.96,2.68,8.31,5.18c2.35,2.5,3.53,6,3.53,10.48
c0,3.8-1.14,7.17-3.42,10.13c-2.28,2.96-5.6,5.26-9.96,6.89c-4.37,1.63-9.55,2.45-15.54,2.45
C292.94,487.95,287.97,487.32,283.11,486.07z"/>
<path class="st1" d="M399.59,425.78v61.26h-16.85v-7.29c-2.35,2.66-5.16,4.69-8.43,6.09c-3.26,1.4-6.79,2.11-10.59,2.11
c-8.05,0-14.42-2.31-19.13-6.95c-4.71-4.63-7.06-11.5-7.06-20.61v-34.61h17.76v32c0,9.87,4.14,14.8,12.41,14.8
c4.25,0,7.67-1.38,10.25-4.16c2.58-2.77,3.87-6.89,3.87-12.35v-30.29H399.59z"/>
<path class="st1" d="M416.1,402.55h17.76v84.49H416.1V402.55z"/>
<path class="st1" d="M510.04,461.42H463.7c0.83,3.8,2.81,6.79,5.92,9c3.11,2.2,6.98,3.3,11.61,3.3c3.19,0,6.01-0.47,8.48-1.42
c2.47-0.95,4.76-2.45,6.89-4.5l9.45,10.25c-5.77,6.6-14.2,9.91-25.28,9.91c-6.91,0-13.02-1.35-18.33-4.04
c-5.31-2.69-9.41-6.43-12.3-11.22c-2.89-4.78-4.33-10.21-4.33-16.28c0-6,1.42-11.4,4.27-16.23c2.85-4.82,6.76-8.58,11.73-11.27
c4.97-2.69,10.53-4.04,16.68-4.04c6,0,11.42,1.29,16.28,3.87c4.86,2.58,8.67,6.28,11.44,11.1c2.77,4.82,4.16,10.42,4.16,16.79
C510.38,456.86,510.27,458.46,510.04,461.42z M468.48,441.72c-2.73,2.28-4.4,5.39-5.01,9.34h30.17c-0.61-3.87-2.28-6.96-5.01-9.28
c-2.73-2.31-6.07-3.47-10.02-3.47C474.59,438.3,471.21,439.44,468.48,441.72z"/>
</g>
<g>
<g>
<path class="st2" d="M144.97,316.25c2.88-4.14,5.7-8.31,8.68-12.38c0.84-1.14,2.13-1.94,3.22-2.9c8.67,2.77,17.24,5.98,26.06,8.18
c7.28,1.81,7.49,1.33,11.08-5.55c9.52-18.28,18.99-36.58,28.42-54.91c3.55-6.9,7.04-13.85,10.34-20.87c1.87-3.99,1-5.28-3.27-5.1
c-5.07,0.21-10.13,0.68-15.19,1.04c1.72-2.35,3.24-4.87,5.2-7.01c4.47-4.88,9.14-9.57,13.74-14.34c1.84-0.03,3.68,0.02,5.52-0.1
c14.62-1.03,29.24-2.1,43.86-3.16c-0.08,0.84-0.24,1.68-0.24,2.52c0.01,48.41,0.03,96.83,0.05,145.24
c-15.73,0.85-30.48,0.97-47.48-0.65c-16.01-1.04-30.66-3.54-46.6-5.49c-13.64-1.67-26.85-5.2-39.21-11.4
c-4.77-2.4-5.86-5.41-4.24-10.45C145.16,318.1,144.96,317.14,144.97,316.25z"/>
<path class="st3" d="M282.42,346.9c-0.02-48.41-0.04-96.83-0.05-145.24c0-0.84,0.05-1.64,0.04-2.48
c5.63,0.1,11.47-0.06,17.08,0.32c11.35,0.78,22.67,1.83,34.01,2.77c2.69,3.09,5.47,6.1,8.05,9.28c3.38,4.17,6.61,8.47,9.9,12.71
c-6.04-0.52-12.07-1.2-18.13-1.49c-4.12-0.2-4.91,1.24-3.08,4.81c9.87,19.27,19.73,38.54,29.65,57.78
c4.02,7.79,8.22,15.49,12.24,23.29c1.46,2.83,3.6,3.9,6.61,3.17c11.52-2.81,23.03-5.68,34.54-8.52c1.8,3.04,3.52,6.13,5.42,9.1
c0.89,1.39,2.13,2.56,3.21,3.83c0,0.56-0.19,1.22,0.04,1.66c3.28,6.31-0.16,9.95-5.82,12.53c-14.18,6.44-29.11,9.85-44.52,11.41
c-12.89,1.31-25.79,2.51-38.68,3.77c-6.24,0.61-12.47,1.45-18.72,1.79c-4.58,0.24-9.2-0.17-13.81-0.3
c-5.95-0.04-11.9-0.08-17.85-0.12L282.42,346.9z"/>
<path class="st2" d="M413.28,303.3c-11.51,2.84-23.02,5.71-34.54,8.52c-3.01,0.74-5.15-0.34-6.61-3.17
c-4.02-7.79-8.22-15.49-12.24-23.29c-9.92-19.24-19.79-38.51-29.65-57.78c-1.83-3.57-1.04-5.01,3.08-4.81
c6.05,0.29,12.09,0.97,18.13,1.49c1.89,0.4,2.54,0.15,5.06,3.74c17.1,24.41,37.01,47.73,54.85,71.62
C412.17,300.72,412.64,302.07,413.28,303.3z"/>
<path class="st3" d="M155.06,302.38c11.51,2.84,22.26,5.47,33.78,8.28c3.01,0.74,5.15-0.34,6.61-3.17
c4.02-7.79,8.22-15.49,12.24-23.29c9.92-19.24,17.3-37.26,26.37-56.7c1.83-3.57,0.68-4.95-3.44-4.75
c-6.05,0.29-10.08,0.42-16.13,0.94c-2.11,1.25-2.46,1.66-3.84,3.47c-18.01,23.75-35.83,47.64-53.67,71.53
C156.18,299.79,155.7,301.14,155.06,302.38z"/>
<path class="st0" d="M421.92,316.24c0,0.56-0.19,1.22,0.04,1.66c3.28,6.31-0.16,9.95-5.82,12.53
c-14.18,6.44-29.11,9.85-44.52,11.41c-12.89,1.31-25.79,2.51-38.68,3.77c-6.24,0.61-12.94,1.22-18.94,1.29
c-4.59,0.05-8.98,0.32-13.59,0.2c-5.95-0.04-11.9-0.08-17.85-0.12c0,0-0.12-0.08-0.12-0.08c-15.36,0.35-28.73,0.35-46.17-1.19
c-15.98-1.41-31.97-2.99-47.91-4.95c-13.64-1.67-26.85-5.2-39.21-11.4c-4.77-2.4-5.86-5.41-4.24-10.45
c0.26-0.81,0.06-1.77,0.07-2.66c-6.55,2.47-11.33,6.45-12.86,13.75c-1.74,8.28,0.69,15.31,5.77,21.67
c1.43,1.79,2.4,3.22,0.07,5.22c-0.71,0.61-0.81,3.27-0.15,3.89c6.36,6.04,13.89,10.11,22.37,12.36c2.35,0.62,4.12,0.02,4.62-2.85
c0.11-0.64,1.63-1.63,2.27-1.49c8.66,1.96,17.26,4.13,25.91,6.14c1.98,0.46,2.73,1,1.52,3.01c-1.45,2.4-0.41,3.92,2,4.93
c8.64,3.63,17.82,3.98,26.97,4.34c2.18,0.08,4.54-0.9,3.51-3.88c-1.11-3.22,0.45-3.2,2.83-2.99c8.57,0.73,17.14,1.44,25.72,1.95
c3.13,0.19,3.98,1.04,2.41,3.98c-1.6,2.98-0.26,4.76,2.9,4.77c14.82,0.08,29.65,0.17,44.46-0.08c4.59-0.08,5.1-1.29,3.36-5.63
c-0.84-2.1-0.97-2.87,1.76-3.02c9.16-0.52,18.32-1.21,27.45-2.12c2.5-0.25,3.06,0.34,2.55,2.56c-0.53,2.31,0.05,4.05,2.72,4.11
c9.52,0.21,18.91-0.53,27.82-4.34c1.95-0.83,3.09-2.06,1.71-4.23c-1.72-2.71-0.09-3.15,2.17-3.67c8.24-1.87,16.46-3.83,24.64-5.93
c1.82-0.47,3-0.77,3.21,1.6c0.26,2.99,2.1,3.32,4.53,2.61c8.11-2.36,15.55-5.98,21.6-11.99c0.69-0.69,1.03-2.99,0.55-3.39
c-3.18-2.71-1.41-4.64,0.51-6.95C437.87,340.92,439.33,322.67,421.92,316.24z"/>
</g>
</g>
<path class="st3" d="M324.35,192.94c-6.72-0.27-13.4-0.35-20.23-0.52c-7.13-0.17-18.9-0.51-18.9-0.51s-1.27,0.04-2.44,0
c0,0-0.63-0.01-0.63,0.18c-0.01-5.67,0.01-11.83,0-17.5c12.58,0.95,24.65,1.94,37.19,2.72c1.5,0.09,3.29-0.07,4.8-0.12
C324.19,182.43,324.33,187.69,324.35,192.94z"/>
<path class="st2" d="M243.35,193.45c6.72-0.27,10.02-0.35,16.86-0.52c7.13-0.17,18.9-0.51,18.9-0.51s1.27,0.04,2.44,0
c0,0,0.63-0.53,0.63-0.34c0.01-5.67-0.01-11.83,0-17.5c-12.58,0.95-21.28,1.94-33.82,2.72c-1.5,0.09-3.29-0.07-4.8-0.12
C243.51,182.43,243.38,188.21,243.35,193.45z"/>
<path class="st0" d="M327.57,193.15c-1.31-0.1-2.62-0.17-3.93-0.26c-13.33-0.32-26.66-0.63-39.99-0.95v0c-0.03,0-0.06,0-0.1,0
c-0.03,0-0.06,0-0.1,0v0c-13.33,0.32-26.66,0.63-39.99,0.95c-1.31,0.08-2.62,0.15-3.93,0.26c-6.26,0.5-6.88,1.16-6.73,7.17
c0.02,0.7,0.18,1.39,0.27,2.09c1.91-0.03,3.82,0.02,5.72-0.1c14.92-1.02,28.65-2.07,43.57-3.11c14.92,1.04,31.01,2.1,45.93,3.11
c1.9,0.13,3.81,0.07,5.72,0.1c0.09-0.7,0.25-1.39,0.27-2.09C334.45,194.31,333.82,193.65,327.57,193.15z"/>
</svg>

After

Width:  |  Height:  |  Size: 9.4 KiB

View File

@@ -3,7 +3,7 @@ type: application
description: A Helm chart to deploy the Capsule Operator for easily implementing,
managing, and maintaining mutitenancy and access control in Kubernetes.
home: https://github.com/clastix/capsule
icon: https://github.com/clastix/capsule/blob/master/assets/logo/space-capsule3.png
icon: https://github.com/clastix/capsule/raw/master/assets/logo/capsule_small.png
keywords:
- kubernetes
- operator
@@ -21,8 +21,8 @@ sources:
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.0.7
version: 0.0.18
# This is the version number of the application being deployed.
# This version number should be incremented each time you make changes to the application.
appVersion: 0.0.3
appVersion: 0.0.4

View File

@@ -37,7 +37,7 @@ The Capsule Operator Chart can be used to instantly deploy the Capsule Operator
$ helm upgrade capsule clastix/capsule -n capsule-system
5. Uninstall the Chart
$ helm uninstall capsule -n capsule-system
## Customize the installation
@@ -48,7 +48,7 @@ The `--values` option is the preferred method because it allows you to keep your
Specify your overrides file when you install the chart:
$ helm install capsule capsule-helm-chart --values myvalues.yaml -n capsule-system
$ helm install capsule capsule-helm-chart --values myvalues.yaml -n capsule-system
The values in your overrides file `myvalues.yaml` will override their counterparts in the charts values.yaml file. Any values in `values.yaml` that werent overridden will keep their defaults.
@@ -67,6 +67,8 @@ Parameter | Description | Default
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
`manager.livenessProbe` | Configure the liveness probe using Deployment probe spec | `GET :10080/healthz`
`manager.readinessProbe` | Configure the readiness probe using Deployment probe spec | `GET :10080/readyz`
`manager.resources.requests/cpu` | Set the CPU requests assigned to the controller. | `200m`
`manager.resources.requests/memory` | Set the memory requests assigned to the controller. | `128Mi`
`manager.resources.limits/cpu` | Set the CPU limits assigned to the controller. | `200m`

View File

@@ -39,6 +39,7 @@ spec:
shortNames:
- tnt
singular: tenant
preserveUnknownFields: false
scope: Cluster
subresources:
status: {}
@@ -140,9 +141,6 @@ spec:
type: array
allowedRegex:
type: string
required:
- allowed
- allowedRegex
type: object
limitRanges:
items:

View File

@@ -62,12 +62,26 @@ Create the name of the service account to use
{{- end }}
{{/*
Create the fully-qualified Docker image to use
Create the manager fully-qualified Docker image to use
*/}}
{{- define "capsule.fullyQualifiedDockerImage" -}}
{{- define "capsule.managerFullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.manager.image.repository ( .Values.manager.image.tag | default (printf "v%s" .Chart.AppVersion) ) -}}
{{- end }}
{{/*
Create the proxy fully-qualified Docker image to use
*/}}
{{- define "capsule.proxyFullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.proxy.image.repository .Values.proxy.image.tag -}}
{{- end }}
{{/*
Create the jobs fully-qualified Docker image to use
*/}}
{{- define "capsule.jobsFullyQualifiedDockerImage" -}}
{{- printf "%s:%s" .Values.jobs.image.repository .Values.jobs.image.tag -}}
{{- end }}
{{/*
Create the Capsule Deployment name to use
*/}}

View File

@@ -1,9 +1,6 @@
apiVersion: v1
kind: Secret
metadata:
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
labels:
{{- include "capsule.labels" . | nindent 4 }}
name: {{ include "capsule.secretCaName" . }}

View File

@@ -1,9 +1,6 @@
apiVersion: v1
kind: Secret
metadata:
annotations:
"helm.sh/hook": "pre-install"
"helm.sh/hook-delete-policy": "before-hook-creation"
labels:
{{- include "capsule.labels" . | nindent 4 }}
name: {{ include "capsule.secretTlsName" . }}

View File

@@ -52,7 +52,7 @@ spec:
{{ if .Values.manager.options.forceTenantPrefix }}- --force-tenant-prefix={{ .Values.manager.options.forceTenantPrefix }}{{ end }}
{{ if .Values.manager.options.capsuleUserGroup }}- --capsule-user-group={{ .Values.manager.options.capsuleUserGroup }}{{ end }}
{{ if .Values.manager.options.protectedNamespaceRegex }}- --protected-namespace-regex={{ .Values.manager.options.protectedNamespaceRegex }}{{ end }}
image: {{ include "capsule.fullyQualifiedDockerImage" . }}
image: {{ include "capsule.managerFullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.manager.image.pullPolicy }}
env:
- name: NAMESPACE
@@ -64,13 +64,9 @@ spec:
containerPort: 9443
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 10080
{{- toYaml .Values.manager.livenessProbe | nindent 12}}
readinessProbe:
httpGet:
path: /readyz
port: 10080
{{- toYaml .Values.manager.readinessProbe | nindent 12}}
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
@@ -80,8 +76,8 @@ spec:
securityContext:
allowPrivilegeEscalation: false
- name: kube-rbac-proxy
image: {{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}
imagePullPolicy: IfNotPresent
image: {{ include "capsule.proxyFullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.proxy.image.pullPolicy }}
args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/

View File

@@ -28,7 +28,8 @@ spec:
restartPolicy: Never
containers:
- name: post-install-job
image: "bitnami/kubectl:1.18"
image: {{ include "capsule.jobsFullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.jobs.image.pullPolicy }}
command: ["sh", "-c", "{{ $cmd }}"]
env:
- name: NAMESPACE

View File

@@ -29,7 +29,8 @@ spec:
restartPolicy: Never
containers:
- name: pre-delete-job
image: "bitnami/kubectl:1.18"
image: {{ include "capsule.jobsFullyQualifiedDockerImage" . }}
imagePullPolicy: {{ .Values.jobs.image.pullPolicy }}
command: [ "sh", "-c", "{{ $cmd }}"]
env:
- name: NAMESPACE

View File

@@ -173,6 +173,7 @@ webhooks:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- tenants
scope: '*'

View File

@@ -13,6 +13,15 @@ manager:
forceTenantPrefix:
capsuleUserGroup:
protectedNamespaceRegex:
livenessProbe:
httpGet:
path: /healthz
port: 10080
readinessProbe:
httpGet:
path: /readyz
port: 10080
resources:
limits:
cpu: 200m
@@ -32,6 +41,11 @@ proxy:
requests:
cpu: 10m
memory: 64Mi
jobs:
image:
repository: bitnami/kubectl
pullPolicy: IfNotPresent
tag: "1.18"
mutatingWebhooksTimeoutSeconds: 30
validatingWebhooksTimeoutSeconds: 30
imagePullSecrets: []

View File

@@ -41,6 +41,7 @@ spec:
shortNames:
- tnt
singular: tenant
preserveUnknownFields: false
scope: Cluster
subresources:
status: {}

View File

@@ -5,4 +5,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/clastix/capsule
newTag: v0.0.3
newTag: v0.0.4

View File

@@ -171,6 +171,7 @@ webhooks:
- v1alpha1
operations:
- CREATE
- UPDATE
resources:
- tenants
- clientConfig:

View File

@@ -98,12 +98,6 @@ func (r TenantReconciler) Reconcile(ctx context.Context, request ctrl.Request) (
return
}
r.Log.Info("Starting processing of Node Selector")
if err = r.ensureNodeSelector(instance); err != nil {
r.Log.Error(err, "Cannot sync Namespaces Node Selector items")
return
}
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges))
if err = r.syncLimitRanges(instance); err != nil {
r.Log.Error(err, "Cannot sync LimitRange items")
@@ -458,24 +452,24 @@ func (r *TenantReconciler) syncLimitRanges(tenant *capsulev1alpha1.Tenant) error
return nil
}
func (r *TenantReconciler) syncNamespace(namespace string, tnt *capsulev1alpha1.Tenant) error {
func (r *TenantReconciler) syncNamespaceMetadata(namespace string, tnt *capsulev1alpha1.Tenant) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
ns := &corev1.Namespace{}
if err = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
return
}
a := ns.GetAnnotations()
a := tnt.Spec.NamespacesMetadata.AdditionalAnnotations
if a == nil {
a = make(map[string]string)
}
// resetting Capsule annotations
delete(a, capsulev1alpha1.AvailableIngressClassesAnnotation)
delete(a, capsulev1alpha1.AvailableIngressClassesRegexpAnnotation)
delete(a, capsulev1alpha1.AvailableStorageClassesAnnotation)
delete(a, capsulev1alpha1.AvailableStorageClassesRegexpAnnotation)
delete(a, capsulev1alpha1.AllowedRegistriesAnnotation)
delete(a, capsulev1alpha1.AllowedRegistriesRegexpAnnotation)
if tnt.Spec.NodeSelector != nil {
var selector []string
for k, v := range tnt.Spec.NodeSelector {
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
}
a["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
}
if tnt.Spec.IngressClasses != nil {
if len(tnt.Spec.IngressClasses.Exact) > 0 {
a[capsulev1alpha1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressClasses.Exact, ",")
@@ -500,27 +494,15 @@ func (r *TenantReconciler) syncNamespace(namespace string, tnt *capsulev1alpha1.
a[capsulev1alpha1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
}
}
ns.SetAnnotations(a)
if aa := tnt.Spec.NamespacesMetadata.AdditionalAnnotations; aa != nil {
for k, v := range aa {
a[k] = v
}
}
l := ns.GetLabels()
l := tnt.Spec.NamespacesMetadata.AdditionalLabels
if l == nil {
l = make(map[string]string)
}
capsuleLabel, _ := capsulev1alpha1.GetTypeLabel(&capsulev1alpha1.Tenant{})
l[capsuleLabel] = tnt.GetName()
if al := tnt.Spec.NamespacesMetadata.AdditionalLabels; al != nil {
for k, v := range al {
l[k] = v
}
}
ns.SetLabels(l)
ns.SetAnnotations(a)
return r.Client.Update(context.TODO(), ns, &client.UpdateOptions{})
})
@@ -533,7 +515,7 @@ func (r *TenantReconciler) syncNamespaces(tenant *capsulev1alpha1.Tenant) (err e
for _, item := range tenant.Status.Namespaces {
namespace := item
group.Go(func() error {
return r.syncNamespace(namespace, tenant)
return r.syncNamespaceMetadata(namespace, tenant)
})
}
@@ -647,35 +629,6 @@ func (r *TenantReconciler) ownerRoleBinding(tenant *capsulev1alpha1.Tenant) erro
return nil
}
func (r *TenantReconciler) ensureNodeSelector(tenant *capsulev1alpha1.Tenant) (err error) {
for _, namespace := range tenant.Status.Namespaces {
ns := &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}
var res controllerutil.OperationResult
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, ns, func() error {
if ns.Annotations == nil {
ns.Annotations = make(map[string]string)
}
var selector []string
for k, v := range tenant.Spec.NodeSelector {
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
}
ns.Annotations["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
return nil
})
r.Log.Info("Namespace Node sync result: "+string(res), "name", ns.Name)
if err != nil {
return err
}
}
return
}
func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1alpha1.Tenant) error {
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
tenant.Status.Size = uint(len(tenant.Status.Namespaces))

View File

@@ -1,5 +1,5 @@
# Capsule Documentation
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It has been designed as a micro-services based ecosystem with the minimalist approach, leveraging only on upstream Kubernetes.
Currently, the Capsule ecosystem comprises the following:
@@ -39,4 +39,4 @@ docs
├── resources-quota-limits.md
├── storage-classes.md
└── taint-namespaces.md
```
```

View File

@@ -77,7 +77,7 @@ Users authenticated through an _OIDC token_ must have
in their token.
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
The [hack/create-user.sh](../../hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
```bash
./hack/create-user.sh alice oil

View File

@@ -676,6 +676,8 @@ Option | Description | Default
`--zap-devel` | The flag to get the stack traces for deep debugging. | `null`
`--capsule-user-group` | Override the Capsule group to which all tenant owners must belong. | `capsule.clastix.io`
`--protected-namespace-regex` | Disallows creation of namespaces matching the passed regexp. | `null`
`--allow-ingress-hostname-collision` | By default, Capsule allows Ingress hostname collision: set to `false` to enforce this policy. | `true`
`--allow-tenant-ingress-hostnames-collision` | Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of two or more Tenant resources although sharing the same allowed hostname(s). | `false`
## Created Resources
Once installed, the Capsule operator creates the following resources in your cluster:

View File

@@ -10,9 +10,9 @@ alice@caas# kubectl create ns oil-production
> it is likely that many different tenants would like to call their namespaces
> as `production`, `test`, or `demo`, etc.
>
> The enforcement of this naming convention, however, is optional and can be controlled by the cluster administrator with the `--force-tenant-prefix` option as argument of the Capsule controller.
> The enforcement of this naming convention is optional and can be controlled by the cluster administrator with the `--force-tenant-prefix` option as an argument of the Capsule controller.
When Alice creates the namespace, the Capsule controller, listening for creation and deletion events assigns to Alice the following roles:
When Alice creates the namespace, the Capsule controller listening for creation and deletion events assigns to Alice the following roles:
```yaml
---
@@ -69,7 +69,7 @@ alice@caas# kubectl create ns oil-development
alice@caas# kubectl create ns oil-test
```
While Alice creates namespace resources, the Capsule controller updates the status of the tenant so Bill, the cluster admin, can check its status:
While Alice creates namespace resources the Capsule controller updates the status of the tenant so Bill, the cluster admin, can check its status:
```
bill@caas# kubectl describe tenant oil
@@ -90,10 +90,9 @@ Once the namespace quota assigned to the tenant has been reached, Alice cannot c
```
alice@caas# kubectl create ns oil-training
Error from server (Cannot exceed Namespace quota: please, reach out the system administrators): admission webhook "quota.namespace.capsule.clastix.io" denied the request.
Error from server (Cannot exceed Namespace quota: please, reach out to the system administrators): admission webhook "quota.namespace.capsule.clastix.io" denied the request.
```
The enforcement on the maximum number of Namespace resources per Tenant is in charge of the Capsule controller via its Dynamic Admission Webhook capability.
The enforcement on the maximum number of Namespace resources per Tenant is the responsibility of the Capsule controller via its Dynamic Admission Webhook capability.
# Whats next
See how Alice, the tenant owner, can assign different user roles in the tenant. [Assign permissions](./permissions.md).
See how Alice, the tenant owner, can assign different user roles in the tenant. [Assign permissions](./permissions.md).

View File

@@ -1,5 +1,5 @@
# Create Custom Resources
Capsule operator the admin permissions to the tenant's users but only limited to their namespaces. To achieve that, it assign the ClusterRole [admin](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) to the tenant owner. This ClusterRole does not permit the installation of custom resources in the namespaces.
Capsule operator can grant the admin permissions to the tenant's users but only limited to their namespaces. To achieve that, it assigns the ClusterRole [admin](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) to the tenant owner. This ClusterRole does not permit the installation of custom resources in the namespaces.
In order to leave the tenant owner to create Custom Resources in their namespaces, the cluster admin defines a proper Cluster Role. For example:
@@ -85,7 +85,7 @@ roleRef:
With the above example, Capsule is leaving the tenant owner to create namespaced custom resources.
> Nota bene: a tenant owner having the admin scope on its namespaces only, does not have the permission to create Custom Resources Definitions (CRDs) because this requires a cluster admin permission level. Only Bill, the cluster admin, can create CRDs. This is a known limitation of any multi-tenancy environment based on a single Kubernetes cluster.
> Take Note: a tenant owner having the admin scope on its namespaces only, does not have the permission to create Custom Resources Definitions (CRDs) because this requires a cluster admin permission level. Only Bill, the cluster admin, can create CRDs. This is a known limitation of any multi-tenancy environment based on a single Kubernetes cluster.
# Whats next
See how Bill, the cluster admin, can set taints on the Alice's namespaces. [Taint namespaces](./taint-namespaces.md).
See how Bill, the cluster admin, can set taints on the Alice's namespaces. [Taint namespaces](./taint-namespaces.md).

View File

@@ -20,9 +20,9 @@ spec:
allowedRegex: ''
```
> In case of naked and official images hosted on Docker Hub, Capsule is going
> In case of `non FQDI` (non fully qualified Docker image) and official images hosted on Docker Hub, Capsule is going
> to retrieve the registry even if it's not explicit: a `busybox:latest` Pod
> running on a Tenant allowing `docker.io` will not blocked, even if the image
> running on a Tenant allowing `docker.io` will not be blocked, even if the image
> field is not explicit as `docker.io/busybox:latest`.
@@ -44,10 +44,10 @@ spec:
A Pod running `internal.registry.foo.tld` as registry will be allowed, as well `internal.registry.bar.tld` since these are matching the regular expression.
> You can also set a catch-all as .* to allow every kind of registry,
> You can also set a catch-all regex entry as .* to allow every kind of registry,
> that would be the same result of unsetting `containerRegistries` at all
As per Ingress and Storage classes, also the allowed registries can be inspected from the Tenant's namespace
As per Ingress and Storage classes the allowed registries can be inspected from the Tenant's namespace
```
alice@caas# kubectl describe ns oil-production
@@ -60,4 +60,3 @@ Annotations: capsule.clastix.io/allowed-registries: docker.io
# Whats next
See how Bill, the cluster admin, can assign Pod Security Policies to Alice's tenant. [Assign Pod Security Policies](./pod-security-policies.md).

View File

@@ -66,10 +66,10 @@ spec:
path: /
```
Any tentative of Alice to use a not valid Ingress Class, e.g. `default`, will fail.
Any attempt of Alice to use a non valid Ingress Class, e.g. `default`, will fail.
> The effect of this policy is that the services created in the tenant will be published
> only on the Ingress Controller designated by Bill to accept one of the allowed Ingress Classes.
# Whats next
See how Bill, the cluster admin, can assign a set of dedicated ingress hostnames to Alice's tenant. [Assign Ingress Hostnames](./ingress-hostnames.md).
See how Bill, the cluster admin, can assign a set of dedicated ingress hostnames to Alice's tenant. [Assign Ingress Hostnames](./ingress-hostnames.md).

View File

@@ -59,7 +59,7 @@ spec:
```
Any tentative of Alice to use a not valid hostname, e.g. `web.gas.acmecorp.org`, will fail.
Any attempt of Alice to use a non valid hostname, e.g. `web.gas.acmecorp.org`, will fail.
# Whats next
See how Bill, the cluster admin, can assign a Storage Class to Alice's tenant. [Assign Storage Classes](./storage-classes.md).
See how Bill, the cluster admin, can assign a Storage Class to Alice's tenant. [Assign Storage Classes](./storage-classes.md).

View File

@@ -1,5 +1,5 @@
# Assign multiple tenants to an owner
In some scenarios, it's likely that a single team is responsible for multiple lines of business. For example, in our sample organization Acme Corp., Alice is responsible for both the Oil and Gas lines of business. Ans it's more probable that Alice requires two different tenants, for example `oil` and `gas` to keep things isolated.
In some scenarios, it's likely that a single team is responsible for multiple lines of business. For example, in our sample organization Acme Corp., Alice is responsible for both the Oil and Gas lines of business. It's more likely that Alice requires two different tenants, for example `oil` and `gas` to keep things isolated.
By design, the Capsule operator does not permit hierarchy of tenants, since all tenants are at the same levels. However, we can assign the ownership of multiple tenants to the same user or group of users.
@@ -107,4 +107,4 @@ kubectl create -f gas-production-ns.yaml
>`Unable to assign namespace to tenant. Please use capsule.clastix.io/tenant label when creating a namespace.`
# Whats next
This end our tour in Capsule use cases. As we improve Capsule, more use cases about multi-tenancy, policy admission control, and cluster governance will be covered in the future. Stay tuned!
This end our tour in Capsule use cases. As we improve Capsule, more use cases about multi-tenancy, policy admission control, and cluster governance will be covered in the future. Stay tuned!

View File

@@ -96,7 +96,7 @@ However, the Capsule controller prevents Alice to delete the tenant network poli
```
alice@caas# kubectl -n oil-production delete networkpolicy capsule-oil-0
Error from server (Capsule Network Policies cannot be deleted: please, reach out the system administrators): admission webhook "validating.network-policy.capsule.clastix.io" denied the request: Capsule Network Policies cannot be deleted: please, reach out the system administrators
Error from server (Capsule Network Policies cannot be deleted: please, reach out to the system administrators): admission webhook "validating.network-policy.capsule.clastix.io" denied the request: Capsule Network Policies cannot be deleted: please, reach out to the system administrators
```
# Whats next

View File

@@ -1,4 +1,4 @@
# Assign a nodes pool
# Assign a node's pool
Bill, the cluster admin, can dedicate a pool of worker nodes to the `oil` tenant, to isolate the tenant applications from other noisy neighbors.
These nodes are labeled by Bill as `pool=oil`
@@ -33,7 +33,7 @@ The Capsule controller makes sure that any namespace created in the tenant has t
The effect is that all the pods deployed by Alice are placed only on the designated pool of nodes.
Any tentative of Alice to change the selector on the pods will result in the following error from
Any attempt of Alice to change the selector on the pods will result in the following error from
the `PodNodeSelector` Admission Controller plugin:
```

View File

@@ -1,5 +1,5 @@
# Onboard a new tenant
Bill receives a new request from the Acme Corp.'s CTO asking a new tenant for Alice's organization has to be on board. Bill assigns the Alice's identity `alice` in the Acme Corp. identity management system. And because, Alice is a tenant owner, Bill needs to assign `alice` the Capsule group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
Bill receives a new request from Acme Corp.'s CTO asking for a new tenant to be onboarded in Alices organization. Bill then assigns Alice's identity of `alice` in Acme Corp. identity management system. Since Alice is a tenant owner, Bill needs to assign `alice` the Capsule group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
To keep the things simple, we assume that Bill just creates a client certificate for authentication using X.509 Certificate Signing Request, so Alice's certificate has `"/CN=alice/O=capsule.clastix.io"`.
@@ -17,7 +17,7 @@ spec:
namespaceQuota: 3
```
Bill checks the new tenant is created and operational:
Bill checks if the new tenant is created and operational:
```
bill@caas# kubectl get tenant oil
@@ -31,7 +31,7 @@ oil 9 0 alice User
Once the new tenant `oil` is in place, Bill sends the login credentials to Alice.
Alice can log in to the CaaS platform and checks if she can create a namespace
Alice can log in to the CaaS platform and check if she can create a namespace
```
alice@caas# kubectl auth can-i create namespaces
@@ -72,7 +72,7 @@ no
```
## Assign a group of users as tenant owner
In the example above, Bill assigned the ownership of `oil` tenant to `alice` user. However, is more likely that multiple users in the Alice's oraganization, need to admin the `oil` tenant. In such cases, Bill can assign the ownership of the `oil` tenant to a group of users instead of a single one.
In the example above, Bill assigned the ownership of `oil` tenant to `alice` user. However, is more likely that multiple users in the Alice's organization, need to administer the `oil` tenant. In such cases, Bill can assign the ownership of the `oil` tenant to a group of users instead of a single one.
Bill creates a new group account `oil` in the Acme Corp. identity management system and then he assigns Alice's identity `alice` to the `oil` group.

View File

@@ -1,31 +1,31 @@
# Use cases for Capsule
Using Capsule, a cluster admin can implement complex multi-tenants scenarios for both public and private deployments. Here a list of common scenarios addressed by Capsule.
Using Capsule, a cluster admin can implement complex multi-tenant scenarios for both public and private deployments. Here a list of common scenarios addressed by Capsule.
# Container as a Service (CaaS)
***Acme Corp***, our sample organization, built a Container as a Service platform (CaaS), based on Kubernetes, to serve multiple lines of business. Each line of business, has its own team of engineers that are responsible for development, deployment, and operating their digital products.
***Acme Corp***, our sample organization, built a Container as a Service platform (CaaS), based on Kubernetes to serve multiple lines of business. Each line of business, has its own team of engineers that are responsible for development, deployment, and operating their digital products.
To simplify the usage of Capsule in this scenario, we'll work with the following actors:
* ***Bill***:
he is the cluster administrator from the operations department of Acme Corp. and he is in charge of admin and maintains the CaaS platform.
he is the cluster administrator from the operations department of Acme Corp. and he is in charge of administration and maintains the CaaS platform.
* ***Alice***:
she works as IT Project Leader at Oil & Gas Business Units, two new lines of business at Acme Corp. Alice is responsible for all the strategic IT projects and she is responsible also for a team made of different background (developers, administrators, SRE engineers, etc.) and organized in separate departments.
she works as the IT Project Leader in the Oil & Gas Business Units. These are two new lines of business at Acme Corp. Alice is responsible for all the strategic IT projects in the two LOB's. She also is responsible for a team made of different job responsibilities (developers, administrators, SRE engineers, etc.) working in separate departments.
* ***Joe***:
he works at Acme Corp, as a lead developer of a distributed team in Alice's organization.
Joe is responsible for developing a mission-critical project in the Oil market.
he works at Acme Corp, as a lead developer of a distributed team in Alice's organization. Joe is responsible for developing a mission-critical project in the Oil market.
* ***Bob***:
he is the head of Engineering for the Water Business Unit, the main and historichal line of business at Acme Corp. He is responsible for development, deployment, and operating multiple digital products in production for a large set of customers.
he is the head of Engineering for the Water Business Unit, the main and historical line of business at Acme Corp. He is responsible for development, deployment, and operating multiple digital products in production for a large set of customers.
Bill, at Acme Corp. can use Capsule to address any of the following scenarios:
* [Onboard a new tenant](./onboarding.md)
* [Create namespaces](./create-namespaces.md)
* [Assign permissions](./permissions.md)
* [Enforce resources quota and limits](./resources-quota-limits.md)
* [Assign a nodes pool](./nodes-pool.md)
* [Onboard a new Tenant](./onboarding.md)
* [Create Namespaces](./create-namespaces.md)
* [Assign Permissions](./permissions.md)
* [Enforce Resources Quotas and Limits](./resources-quota-limits.md)
* [Assign specific Node Pools](./nodes-pool.md)
* [Assign Ingress Classes](./ingress-classes.md)
* [Assign Ingress Hostnames](./ingress-hostnames.md)
* [Assign Storage Classes](./storage-classes.md)
@@ -33,11 +33,12 @@ Bill, at Acme Corp. can use Capsule to address any of the following scenarios:
* [Assign Trusted Images Registries](./images-registries.md)
* [Assign Pod Security Policies](./pod-security-policies.md)
* [Create Custom Resources](./custom-resources.md)
* [Taint namespaces](./taint-namespaces.md)
* [Assign multiple tenants to an owner](./multiple-tenants.md)
* [Taint Namespaces](./taint-namespaces.md)
* [Assign multiple Tenants to an owner](./multiple-tenants.md)
> NB: as we improve Capsule, more use cases about multi-tenancy and cluster governance will be covered.
# Whats next
See how the cluster admin puts a new tenant onboard. [Onboard a new tenant](./onboarding.md).
Now let's see how the cluster admin onboards a new tenant. [Onboarding a new tenant](./onboarding.md).

View File

@@ -3,9 +3,9 @@ Alice acts as the tenant admin. Other users can operate inside the tenant with d
One of the key design principles of the Capsule is the self-provisioning management from the tenant owner's perspective. Alice, the tenant owner, does not need to interact with Bill, the cluster admin, to complete her day-by-day duties. On the other side, Bill has not to deal with multiple requests coming from multiple tenant owners that probably will overwhelm him.
Capsule leaves Alice the freedom to create RBAC roles at the namespace level, or using the pre-defined cluster roles already available in Kubernetes, and assign them to other users in the tenant. Being roles and rolebindings, limited to a namespace scope, Alice can assign the roles to the other users accessing the same tenant only after the namespace is created. This gives Alice the power to admin the tenant without the inteervention of the cluster admin.
Capsule leaves Alice the freedom to create RBAC roles at the namespace level, or using the pre-defined cluster roles already available in Kubernetes, and assign them to other users in the tenant. Since roles and rolebindings are limited to a namespace scope, Alice can assign the roles to the other users accessing the same tenant only after the namespace is created. This gives Alice the power to administer the tenant without the intervention of the cluster admin.
From the cluster admin perspective, the only required action to Bill is to provision the other identities, eg. `joe` in the Identity Management system of Acme Corp. But this task can be done once, when onboarding the tenant and the users accessing the tenant can be part of the tenant business profile.
From the cluster admin perspective, the only required action for Bill is to provision the other identities, eg. `joe` in the Identity Management system of Acme Corp. but this task can be done once, when onboarding the tenant and the users accessing the tenant can be part of the tenant business profile.
Alice can create Roles and RoleBindings only in the namespaces she owns
@@ -40,4 +40,4 @@ subjects:
Joe now can operate on the namespace `oil-development` as admin but he has no access to the other namespaces `oil-production`, and `oil-test` that are part of the same tenant.
# Whats next
See how Bill, the cluster admin, set resources quota and limits for Alice's tenant. [Enforce resources quota and limits](./resources-quota-limits.md).
See how Bill, the cluster admin, set resources quota and limits for Alice's tenant. [Enforce resources quota and limits](./resources-quota-limits.md).

View File

@@ -1,5 +1,5 @@
# Enforce resources quota and limits
With help of Capsule, Bill, the cluster admin, can set and enforce resources quota and limits for the Alice's tenant
With help of Capsule, Bill and the cluster admin can set and enforce resources quota and limits for the Alice's tenant
```yaml
apiVersion: capsule.clastix.io/v1alpha1
@@ -199,7 +199,7 @@ PersistentVolumeClaim storage 1Gi 10Gi - - -
Being the limit range specific of single resources, there is no aggregate to count.
Having access to resource quota and limits, however, Alice is not able to change or delete it according to the assigned RBAC profile.
Having access to resource quotas and limits, Alice still doesn't have permissions to change or delete the resources according to the assigned RBAC profile.
```
alice@caas# kubectl -n oil-production auth can-i patch resourcequota

View File

@@ -17,7 +17,7 @@ spec:
...
```
It is also possible to use regular expression for assigning Storage Classes:
It is also possible to use a regular expression for assigning Storage Classes:
```yaml
apiVersion: capsule.clastix.io/v1alpha1
@@ -63,7 +63,7 @@ spec:
storage: 12Gi
```
Any tentative of Alice to use a not valid Storage Class, e.g. `default`, will fail::
Any attempt of Alice to use a non valid Storage Class, e.g. `default`, will fail::
```
Error from server: error when creating persistent volume claim pvc:
admission webhook "pvc.capsule.clastix.io" denied the request:
@@ -71,4 +71,4 @@ Storage Class default is forbidden for the current Tenant
```
# Whats next
See how Bill, the cluster admin, can assign Network Policies to Alice's tenant. [Assign Network Policies](./network-policies.md).
See how Bill, the cluster admin, can assign Network Policies to Alice's tenant. [Assign Network Policies](./network-policies.md).

View File

@@ -0,0 +1,127 @@
//+build e2e
/*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/clastix/capsule/api/v1alpha1"
)
var _ = Describe("when handling Ingress hostnames collision", func() {
tnt := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress-hostnames-allowed-collision",
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
Name: "ingress-allowed",
Kind: "User",
},
},
}
// scaffold a basic networking.k8s.io Ingress with name and host
networkingIngress := func(name, hostname string) *networkingv1.Ingress {
return &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: hostname,
},
},
},
}
}
// scaffold a basic extensions Ingress with name and host
extensionsIngress := func(name, hostname string) *extensionsv1beta1.Ingress {
return &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: extensionsv1beta1.IngressSpec{
Rules: []extensionsv1beta1.IngressRule{
{
Host: hostname,
},
},
},
}
}
JustBeforeEach(func() {
EventuallyCreation(func() error {
tnt.ResourceVersion = ""
return k8sClient.Create(context.TODO(), tnt)
}).Should(Succeed())
})
JustAfterEach(func() {
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
})
It("should allow creating several Ingress with same hostname", func() {
maj, min, _ := GetKubernetesSemVer()
ns := NewNamespace("allowed-collision")
cs := ownerClient(tnt)
NamespaceCreation(ns, tnt, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, podRecreationTimeoutInterval).Should(ContainElement(ns.GetName()))
if maj == 1 && min > 18 {
By("testing networking.k8s.io", func() {
Eventually(func() (err error) {
obj := networkingIngress("networking-1", "kubernetes.io")
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Eventually(func() (err error) {
obj := networkingIngress("networking-2", "kubernetes.io")
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
})
}
if maj == 1 && min < 22 {
By("testing extensions", func() {
Eventually(func() (err error) {
obj := extensionsIngress("extensions-1", "kubernetes.io")
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Eventually(func() (err error) {
obj := extensionsIngress("extensions-2", "kubernetes.io")
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
})
}
})
})

View File

@@ -0,0 +1,128 @@
//+build e2e
/*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/clastix/capsule/api/v1alpha1"
)
var _ = Describe("when handling Ingress hostnames collision", func() {
tnt := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "ingress-hostnames-denied-collision",
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
Name: "ingress-denied",
Kind: "User",
},
},
}
// scaffold a basic networking.k8s.io Ingress with name and host
networkingIngress := func(name, hostname string) *networkingv1.Ingress {
return &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: hostname,
},
},
},
}
}
// scaffold a basic extensions Ingress with name and host
extensionsIngress := func(name, hostname string) *extensionsv1beta1.Ingress {
return &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: extensionsv1beta1.IngressSpec{
Rules: []extensionsv1beta1.IngressRule{
{
Host: hostname,
},
},
},
}
}
JustBeforeEach(func() {
EventuallyCreation(func() error {
return k8sClient.Create(context.TODO(), tnt)
}).Should(Succeed())
ModifyCapsuleManagerPodArgs(append(defaulManagerPodArgs, []string{"--allow-ingress-hostname-collision=false"}...))
})
JustAfterEach(func() {
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
ModifyCapsuleManagerPodArgs(defaulManagerPodArgs)
})
It("should not allow creating several Ingress with same hostname", func() {
maj, min, _ := GetKubernetesSemVer()
ns := NewNamespace("allowed-collision")
cs := ownerClient(tnt)
NamespaceCreation(ns, tnt, defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, podRecreationTimeoutInterval).Should(ContainElement(ns.GetName()))
if maj == 1 && min > 18 {
By("testing networking.k8s.io", func() {
Eventually(func() (err error) {
obj := networkingIngress("networking-1", "kubernetes.io")
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Eventually(func() (err error) {
obj := networkingIngress("networking-2", "kubernetes.io")
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).ShouldNot(Succeed())
})
}
if maj == 1 && min < 22 {
By("testing extensions", func() {
Eventually(func() (err error) {
obj := extensionsIngress("extensions-1", "cncf.io")
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Eventually(func() (err error) {
obj := extensionsIngress("extensions-2", "cncf.io")
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).ShouldNot(Succeed())
})
}
})
})

View File

@@ -200,7 +200,7 @@ var _ = Describe("when Tenant handles Ingress hostnames", func() {
By("testing networking.k8s.io", func() {
for _, h := range []string{"foo", "bar", "bizz"} {
Eventually(func() (err error) {
obj := networkingIngress(fmt.Sprintf("allowed-networking-%s", h), h)
obj := networkingIngress(fmt.Sprintf("allowed-networking-%s", h), fmt.Sprintf("%s.clastix.io", h))
_, err = cs.NetworkingV1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
@@ -212,7 +212,7 @@ var _ = Describe("when Tenant handles Ingress hostnames", func() {
By("testing extensions", func() {
for _, h := range []string{"foo", "bar", "bizz"} {
Eventually(func() (err error) {
obj := extensionsIngress(fmt.Sprintf("allowed-extensions-%s", h), h)
obj := extensionsIngress(fmt.Sprintf("allowed-extensions-%s", h), fmt.Sprintf("%s.clastix.io", h))
_, err = cs.ExtensionsV1beta1().Ingresses(ns.GetName()).Create(context.TODO(), obj, metav1.CreateOptions{})
return
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())

View File

@@ -0,0 +1,82 @@
//+build e2e
/*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/clastix/capsule/api/v1alpha1"
)
var _ = Describe("when a second Tenant contains an already declared allowed Ingress hostname", func() {
tnt := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "allowed-collision-ingress-hostnames",
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
Name: "first-user",
Kind: "User",
},
IngressHostnames: &v1alpha1.AllowedListSpec{
Exact: []string{"capsule.clastix.io", "docs.capsule.k8s", "42.clatix.io"},
},
},
}
JustBeforeEach(func() {
EventuallyCreation(func() error {
tnt.ResourceVersion = ""
return k8sClient.Create(context.TODO(), tnt)
}).Should(Succeed())
ModifyCapsuleManagerPodArgs(append(defaulManagerPodArgs, []string{"--allow-tenant-ingress-hostnames-collision=true"}...))
})
JustAfterEach(func() {
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
ModifyCapsuleManagerPodArgs(defaulManagerPodArgs)
})
It("should not block creation if contains collided Ingress hostnames", func() {
for i, h := range tnt.Spec.IngressHostnames.Exact {
tnt2 := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", tnt.GetName(), i),
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
Name: "second-user",
Kind: "User",
},
IngressHostnames: &v1alpha1.AllowedListSpec{
Exact: []string{h},
},
},
}
EventuallyCreation(func() error {
return k8sClient.Create(context.TODO(), tnt2)
}).Should(Succeed())
_ = k8sClient.Delete(context.TODO(), tnt2)
}
})
})

View File

@@ -20,6 +20,7 @@ package e2e
import (
"context"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -31,7 +32,7 @@ import (
var _ = Describe("when a second Tenant contains an already declared allowed Ingress hostname", func() {
tnt := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "first-ingress-hostnames",
Name: "no-collision-ingress-hostnames",
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
@@ -55,10 +56,10 @@ var _ = Describe("when a second Tenant contains an already declared allowed Ingr
})
It("should block creation if contains collided Ingress hostnames", func() {
for _, h := range tnt.Spec.IngressHostnames.Exact {
for i, h := range tnt.Spec.IngressHostnames.Exact {
tnt2 := &v1alpha1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "second-ingress-hostnames",
Name: fmt.Sprintf("%s-%d", tnt.GetName(), i),
},
Spec: v1alpha1.TenantSpec{
Owner: v1alpha1.OwnerSpec{
@@ -70,7 +71,9 @@ var _ = Describe("when a second Tenant contains an already declared allowed Ingr
},
},
}
Expect(k8sClient.Delete(context.TODO(), tnt2)).ShouldNot(Succeed())
EventuallyCreation(func() error {
return k8sClient.Create(context.TODO(), tnt2)
}).ShouldNot(Succeed())
}
})
})

View File

@@ -22,7 +22,6 @@ import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -186,7 +185,7 @@ var _ = Describe("creating namespaces within a Tenant with resources", func() {
lr := &corev1.LimitRange{}
Eventually(func() error {
return k8sClient.Get(context.TODO(), types.NamespacedName{Name: n, Namespace: name}, lr)
}, 10*time.Second, time.Second).Should(Succeed())
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Expect(lr.Spec).Should(Equal(s))
}
})
@@ -196,7 +195,7 @@ var _ = Describe("creating namespaces within a Tenant with resources", func() {
np := &networkingv1.NetworkPolicy{}
Eventually(func() error {
return k8sClient.Get(context.TODO(), types.NamespacedName{Name: n, Namespace: name}, np)
}, 10*time.Second, time.Second).Should(Succeed())
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Expect(np.Spec).Should(Equal(s))
}
})
@@ -209,7 +208,7 @@ var _ = Describe("creating namespaces within a Tenant with resources", func() {
ns := &corev1.Namespace{}
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: name}, ns)).Should(Succeed())
return ns.GetAnnotations()["scheduler.alpha.kubernetes.io/node-selector"]
}, 10*time.Second, time.Second).Should(Equal(strings.Join(selector, ",")))
}, defaultTimeoutInterval, defaultPollInterval).Should(Equal(strings.Join(selector, ",")))
})
By("checking the Resource Quota", func() {
for i, s := range tnt.Spec.ResourceQuota {
@@ -217,7 +216,7 @@ var _ = Describe("creating namespaces within a Tenant with resources", func() {
rq := &corev1.ResourceQuota{}
Eventually(func() error {
return k8sClient.Get(context.TODO(), types.NamespacedName{Name: n, Namespace: name}, rq)
}, 10*time.Second, time.Second).Should(Succeed())
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
Expect(rq.Spec).Should(Equal(s))
}
})

1
go.mod
View File

@@ -7,6 +7,7 @@ require (
github.com/hashicorp/go-multierror v1.1.0
github.com/onsi/ginkgo v1.14.1
github.com/onsi/gomega v1.10.2
github.com/pkg/errors v0.9.1
github.com/stretchr/testify v1.5.1
go.uber.org/zap v1.15.0
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e

89
hack/create-user-openshift.sh Executable file
View File

@@ -0,0 +1,89 @@
#!/bin/bash
# This script uses Kubernetes CertificateSigningRequest (CSR) to a generate a
# certificate signed by the Kubernetes CA itself.
# It requires cluster admin permission.
#
# e.g.: ./create-user-openshift.sh alice oil
# where `oil` is the Tenant and `alice` the owner
# Check if OpenSSL is installed
if [[ ! -x "$(command -v openssl)" ]]; then
echo "Error: openssl not found"
exit 1
fi
# Check if kubectl is installed
if [[ ! -x "$(command -v kubectl)" ]]; then
echo "Error: kubectl not found"
exit 1
fi
# Check if oc is installed
if [[ ! -x "$(command -v oc)" ]]; then
echo "Error: kubectl not found"
exit 1
fi
USER=$1
TENANT=$2
if [[ -z ${USER} ]]; then
echo "User has not been specified!"
exit 1
fi
if [[ -z ${TENANT} ]]; then
echo "Tenant has not been specified!"
exit 1
fi
GROUP=capsule.clastix.io
TMPDIR=$(mktemp -d)
echo "creating certs in TMPDIR ${TMPDIR} "
openssl genrsa -out ${TMPDIR}/tls.key 2048
openssl req -new -key ${TMPDIR}/tls.key -subj "/CN=${USER}/O=${GROUP}" -out ${TMPDIR}/${USER}-${TENANT}.csr
# Clean any previously created CSR for the same user.
kubectl delete csr ${USER}-${TENANT} 2>/dev/null || true
# Create a new CSR file.
cat <<EOF > ${TMPDIR}/${USER}-${TENANT}-csr.yaml
apiVersion: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
metadata:
name: ${USER}-${TENANT}
spec:
groups:
- system:authenticated
request: $(cat ${TMPDIR}/${USER}-${TENANT}.csr | base64 | tr -d '\n')
usages:
- digital signature
- key encipherment
- client auth
EOF
# Create the CSR
kubectl apply -f ${TMPDIR}/${USER}-${TENANT}-csr.yaml
# Approve and fetch the signed certificate
# In OCP why must use oc adm certificate approve
oc adm certificate approve ${USER}-${TENANT}
kubectl get csr ${USER}-${TENANT} -o jsonpath='{.status.certificate}' | base64 --decode > ${TMPDIR}/tls.crt
# Create the kubeconfig file
CONTEXT=$(kubectl config current-context)
CLUSTER=$(kubectl config view -o jsonpath="{.contexts[?(@.name == \"$CONTEXT\"})].context.cluster}")
SERVER=$(kubectl config view -o jsonpath="{.clusters[?(@.name == \"${CLUSTER}\"})].cluster.server}")
#create context for the new user:
oc config set-credentials ${USER} --client-certificate=${TMPDIR}/tls.crt --client-key=${TMPDIR}/tls.key --embed-certs --kubeconfig=${USER}-${TENANT}.kubeconfig
#set current context for new user
oc config set-context ${USER} --cluster=$(oc config view -o jsonpath='{.clusters[0].name}') --namespace=default --user=${USER} --kubeconfig=${USER}-${TENANT}.kubeconfig
echo "kubeconfig file is:" ${USER}-${TENANT}.kubeconfig
echo "to use it as ${USER}: 'oc config use-context ${USER} --kubeconfig=${USER}-${TENANT}.kubeconfig'"

View File

@@ -37,8 +37,8 @@ GROUP=capsule.clastix.io
TMPDIR=$(mktemp -d)
echo "creating certs in TMPDIR ${TMPDIR} "
openssl genrsa -out ${TMPDIR}/tls.key 2048
openssl req -new -key ${TMPDIR}/tls.key -subj "/CN=${USER}/O=${GROUP}" -out ${TMPDIR}/${USER}-${TENANT}.csr
openssl genrsa -out ${USER}-${TENANT}.key 2048
openssl req -new -key ${USER}-${TENANT}.key -subj "/CN=${USER}/O=${GROUP}" -out ${TMPDIR}/${USER}-${TENANT}.csr
# Clean any previously created CSR for the same user.
kubectl delete csr ${USER}-${TENANT} 2>/dev/null || true
@@ -64,7 +64,7 @@ kubectl apply -f ${TMPDIR}/${USER}-${TENANT}-csr.yaml
# Approve and fetch the signed certificate
kubectl certificate approve ${USER}-${TENANT}
kubectl get csr ${USER}-${TENANT} -o jsonpath='{.status.certificate}' | base64 --decode > ${TMPDIR}/tls.crt
kubectl get csr ${USER}-${TENANT} -o jsonpath='{.status.certificate}' | base64 --decode > ${USER}-${TENANT}.crt
# Create the kubeconfig file
CONTEXT=$(kubectl config current-context)
@@ -90,8 +90,8 @@ preferences: {}
users:
- name: ${USER}
user:
client-certificate-data: $(cat ${TMPDIR}/tls.crt | base64 | tr -d '\n')
client-key-data: $(cat ${TMPDIR}/tls.key | base64 | tr -d '\n')
client-certificate: ${USER}-${TENANT}.crt
client-key: ${USER}-${TENANT}.key
EOF
echo "kubeconfig file is:" ${USER}-${TENANT}.kubeconfig

15
main.go
View File

@@ -80,6 +80,8 @@ func main() {
var capsuleGroup string
var protectedNamespaceRegexpString string
var protectedNamespaceRegexp *regexp.Regexp
var allowTenantIngressHostnamesCollision bool
var allowIngressHostnamesCollision bool
var namespace string
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
@@ -92,6 +94,15 @@ func main() {
"during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash. "+
"This is useful to avoid Namespace name collision in a public CaaS environment.")
flag.StringVar(&protectedNamespaceRegexpString, "protected-namespace-regex", "", "Disallow creation of namespaces, whose name matches this regexp")
flag.BoolVar(
&allowTenantIngressHostnamesCollision,
"allow-tenant-ingress-hostnames-collision",
false,
"When defining the exact match for allowed Ingress hostnames at Tenant level, a collision is not allowed. "+
"Toggling this, Capsule will not check if a hostname collision is in place, allowing the creation of "+
"two or more Tenant resources although sharing the same allowed hostname(s).",
)
flag.BoolVar(&allowIngressHostnamesCollision, "allow-ingress-hostname-collision", true, "Allow the Ingress hostname collision at Ingress resource level across all the Tenants.")
opts := zap.Options{
EncoderConfigOptions: append([]zap.EncoderConfigOption{}, func(config *zapcore.EncoderConfig) {
@@ -157,7 +168,7 @@ func main() {
// webhooks: the order matters, don't change it and just append
wl := append(
make([]webhook.Webhook, 0),
ingress.Webhook(ingress.Handler()),
ingress.Webhook(ingress.Handler(allowIngressHostnamesCollision)),
pvc.Webhook(pvc.Handler()),
registry.Webhook(registry.Handler()),
services.Webhook(services.Handler()),
@@ -165,7 +176,7 @@ func main() {
namespacequota.Webhook(utils.InCapsuleGroup(capsuleGroup, namespacequota.Handler())),
networkpolicies.Webhook(utils.InCapsuleGroup(capsuleGroup, networkpolicies.Handler())),
tenantprefix.Webhook(utils.InCapsuleGroup(capsuleGroup, tenantprefix.Handler(forceTenantPrefix, protectedNamespaceRegexp))),
tenant.Webhook(tenant.Handler()),
tenant.Webhook(tenant.Handler(allowTenantIngressHostnamesCollision)),
)
if err = webhook.Register(mgr, wl...); err != nil {
setupLog.Error(err, "unable to setup webhooks")

View File

@@ -0,0 +1,38 @@
/*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package indexer
import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
"github.com/clastix/capsule/pkg/indexer/ingress"
"github.com/clastix/capsule/pkg/webhook/utils"
)
func init() {
AddToIndexerFuncs = append(AddToIndexerFuncs, ingress.Hostname{Obj: &extensionsv1beta1.Ingress{}})
// ingresses.networking.k8s.io/v1 introduced by 1.19
{
majorVer, minorVer, _, _ := utils.GetK8sVersion()
if majorVer == 1 && minorVer >= 19 {
AddToIndexerFuncs = append(AddToIndexerFuncs, ingress.Hostname{Obj: &networkingv1.Ingress{}})
}
}
AddToIndexerFuncs = append(AddToIndexerFuncs, ingress.Hostname{Obj: &networkingv1beta1.Ingress{}})
}

View File

@@ -0,0 +1,64 @@
/*
Copyright 2020 Clastix Labs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ingress
import (
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type Hostname struct {
Obj metav1.Object
}
func (h Hostname) Object() client.Object {
return h.Obj.(client.Object)
}
func (h Hostname) Field() string {
return ".spec.rules[*].host"
}
func (h Hostname) Func() client.IndexerFunc {
return func(object client.Object) (hostnames []string) {
switch h.Obj.(type) {
case *networkingv1.Ingress:
ing := object.(*networkingv1.Ingress)
for _, r := range ing.Spec.Rules {
hostnames = append(hostnames, r.Host)
}
return
case *networkingv1beta1.Ingress:
ing := object.(*networkingv1beta1.Ingress)
for _, r := range ing.Spec.Rules {
hostnames = append(hostnames, r.Host)
}
return
case *extensionsv1beta1.Ingress:
ing := object.(*extensionsv1beta1.Ingress)
for _, r := range ing.Spec.Rules {
hostnames = append(hostnames, r.Host)
}
return
default:
return
}
}
}

View File

@@ -20,23 +20,29 @@ import (
"sort"
)
type UserGroupList []string
func (u UserGroupList) Len() int {
return len(u)
type UserGroupList interface {
Find(needle string) (found bool)
}
func (u UserGroupList) Less(i, j int) bool {
return u[i] < u[j]
type userGroupList []string
func NewUserGroupList(groups []string) UserGroupList {
list := make(userGroupList, len(groups))
for k, v := range groups {
list[k] = v
}
sort.SliceStable(list, func(i, j int) bool {
return list[i] < list[j]
})
return list
}
func (u UserGroupList) Swap(i, j int) {
u[i], u[j] = u[j], u[i]
}
func (u UserGroupList) IsInCapsuleGroup(capsuleGroup string) (ok bool) {
sort.Sort(u)
i := sort.SearchStrings(u, capsuleGroup)
ok = i < u.Len() && u[i] == capsuleGroup
// Find sorts itself using the SliceStable and perform a binary-search for the given string.
func (u userGroupList) Find(needle string) (found bool) {
sort.SliceStable(u, func(i, j int) bool {
return i < j
})
i := sort.SearchStrings(u, needle)
found = i < len(u) && u[i] == needle
return
}

View File

@@ -19,5 +19,5 @@ func TestIsInCapsuleGroup(t *testing.T) {
capsuleGroup := "kubernetes-abilitytologin"
assert.True(t, UserGroupList(groups).IsInCapsuleGroup(capsuleGroup), nil)
assert.True(t, NewUserGroupList(groups).Find(capsuleGroup), nil)
}

View File

@@ -45,6 +45,18 @@ type ingressHostnameNotValid struct {
spec v1alpha1.AllowedListSpec
}
type ingressHostnameCollision struct {
hostname string
}
func (i ingressHostnameCollision) Error() string {
return fmt.Sprintf("hostname %s is already used across the cluster: please, reach out to the system administrators", i.hostname)
}
func NewIngressHostnameCollision(hostname string) error {
return ingressHostnameCollision{hostname: hostname}
}
func NewIngressHostnamesNotValid(invalidHostnames []string, notMatchingHostnames []string, spec v1alpha1.AllowedListSpec) error {
return &ingressHostnameNotValid{invalidHostnames: invalidHostnames, notMatchingHostnames: notMatchingHostnames, spec: spec}
}

View File

@@ -29,6 +29,7 @@ const (
type Ingress interface {
IngressClass() *string
Namespace() string
Name() string
Hostnames() []string
}
@@ -36,6 +37,10 @@ type NetworkingV1 struct {
*networkingv1.Ingress
}
func (n NetworkingV1) Name() string {
return n.GetName()
}
func (n NetworkingV1) IngressClass() (res *string) {
res = n.Spec.IngressClassName
if res == nil {
@@ -65,6 +70,10 @@ type NetworkingV1Beta1 struct {
*networkingv1beta.Ingress
}
func (n NetworkingV1Beta1) Name() string {
return n.GetName()
}
func (n NetworkingV1Beta1) IngressClass() (res *string) {
res = n.Spec.IngressClassName
if res == nil {
@@ -94,6 +103,10 @@ type Extension struct {
*extensionsv1beta1.Ingress
}
func (e Extension) Name() string {
return e.GetName()
}
func (e Extension) IngressClass() (res *string) {
res = e.Spec.IngressClassName
if res == nil {

View File

@@ -22,7 +22,7 @@ import (
"net/http"
"regexp"
"github.com/go-logr/logr"
"github.com/pkg/errors"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
networkingv1 "k8s.io/api/networking/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
@@ -58,11 +58,11 @@ func (w *webhook) GetPath() string {
}
type handler struct {
Log logr.Logger
allowHostnamesCollision bool
}
func Handler() capsulewebhook.Handler {
return &handler{}
func Handler(allowIngressHostnamesCollision bool) capsulewebhook.Handler {
return &handler{allowHostnamesCollision: allowIngressHostnamesCollision}
}
func (r *handler) OnCreate(client client.Client, decoder *admission.Decoder) capsulewebhook.Func {
@@ -121,49 +121,40 @@ func (r *handler) ingressFromRequest(req admission.Request, decoder *admission.D
return
}
func (r *handler) validateIngress(ctx context.Context, c client.Client, ingress Ingress) admission.Response {
func (r *handler) validateClass(tenant v1alpha1.Tenant, ingressClass *string) error {
if tenant.Spec.IngressClasses == nil {
return nil
}
if ingressClass == nil {
return NewIngressClassNotValid(*tenant.Spec.IngressClasses)
}
var valid, matched bool
tl := &v1alpha1.TenantList{}
if err := c.List(ctx, tl, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".status.namespaces", ingress.Namespace()),
}); err != nil {
return admission.Errored(http.StatusBadRequest, err)
if len(tenant.Spec.IngressClasses.Exact) > 0 {
valid = tenant.Spec.IngressClasses.ExactMatch(*ingressClass)
}
if len(tl.Items) == 0 {
return admission.Allowed("")
}
tnt := tl.Items[0]
if tnt.Spec.IngressClasses == nil {
return admission.Allowed("")
}
ingressClass := ingress.IngressClass()
if ingressClass == nil {
return admission.Errored(http.StatusBadRequest, NewIngressClassNotValid(*tnt.Spec.IngressClasses))
}
if len(tnt.Spec.IngressClasses.Exact) > 0 {
valid = tnt.Spec.IngressClasses.ExactMatch(*ingressClass)
}
matched = tnt.Spec.IngressClasses.RegexMatch(*ingressClass)
matched = tenant.Spec.IngressClasses.RegexMatch(*ingressClass)
if !valid && !matched {
return admission.Errored(http.StatusBadRequest, NewIngressClassForbidden(*ingressClass, *tnt.Spec.IngressClasses))
return NewIngressClassForbidden(*ingressClass, *tenant.Spec.IngressClasses)
}
if tnt.Spec.IngressHostnames == nil {
return admission.Allowed("")
return nil
}
func (r *handler) validateHostnames(tenant v1alpha1.Tenant, hostnames []string) error {
if tenant.Spec.IngressHostnames == nil {
return nil
}
var valid, matched bool
var invalidHostnames []string
hostnames := ingress.Hostnames()
if len(hostnames) > 0 {
for _, currentHostname := range hostnames {
isPresent := v1alpha1.IngressHostnamesList(tnt.Spec.IngressHostnames.Exact).IsStringInList(currentHostname)
isPresent := v1alpha1.IngressHostnamesList(tenant.Spec.IngressHostnames.Exact).IsStringInList(currentHostname)
if !isPresent {
invalidHostnames = append(invalidHostnames, currentHostname)
}
@@ -174,10 +165,10 @@ func (r *handler) validateIngress(ctx context.Context, c client.Client, ingress
}
var notMatchingHostnames []string
allowedRegex := tnt.Spec.IngressHostnames.Regex
allowedRegex := tenant.Spec.IngressHostnames.Regex
if len(allowedRegex) > 0 {
for _, currentHostname := range hostnames {
matched, _ = regexp.MatchString(tnt.Spec.IngressHostnames.Regex, currentHostname)
matched, _ = regexp.MatchString(tenant.Spec.IngressHostnames.Regex, currentHostname)
if !matched {
notMatchingHostnames = append(notMatchingHostnames, currentHostname)
}
@@ -188,8 +179,106 @@ func (r *handler) validateIngress(ctx context.Context, c client.Client, ingress
}
if !valid && !matched {
return admission.Errored(http.StatusBadRequest, NewIngressHostnamesNotValid(invalidHostnames, notMatchingHostnames, *tnt.Spec.IngressHostnames))
return NewIngressHostnamesNotValid(invalidHostnames, notMatchingHostnames, *tenant.Spec.IngressHostnames)
}
return nil
}
func (r *handler) validateIngress(ctx context.Context, c client.Client, ingress Ingress) admission.Response {
tl := &v1alpha1.TenantList{}
if err := c.List(ctx, tl, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".status.namespaces", ingress.Namespace()),
}); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if len(tl.Items) == 0 {
return admission.Allowed("")
}
tnt := tl.Items[0]
if err := r.validateClass(tnt, ingress.IngressClass()); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if err := r.validateHostnames(tnt, ingress.Hostnames()); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if err := r.validateCollision(ctx, c, ingress); err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
return admission.Allowed("")
}
func (r *handler) validateCollision(ctx context.Context, clt client.Client, ingress Ingress) error {
if r.allowHostnamesCollision {
return nil
}
for _, hostname := range ingress.Hostnames() {
collisionErr := NewIngressHostnameCollision(hostname)
var err error
switch ingress.(type) {
case Extension:
el := &extensionsv1beta1.IngressList{}
if err = clt.List(ctx, el, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".spec.rules[*].host", hostname),
}); err != nil {
return err
}
switch len(el.Items) {
case 0:
break
case 1:
if f := el.Items[0]; f.GetName() == ingress.Name() && f.GetNamespace() == ingress.Namespace() {
break
}
fallthrough
default:
return collisionErr
}
case NetworkingV1:
nl := &networkingv1.IngressList{}
err = clt.List(ctx, nl, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".spec.rules[*].host", hostname),
})
if err != nil {
return errors.Wrap(err, "cannot list *networkingv1.IngressList by MatchingFieldsSelector")
}
switch len(nl.Items) {
case 0:
break
case 1:
if f := nl.Items[0]; f.GetName() == ingress.Name() && f.GetNamespace() == ingress.Namespace() {
break
}
fallthrough
default:
return collisionErr
}
case NetworkingV1Beta1:
nlb := &networkingv1beta1.IngressList{}
err = clt.List(ctx, nlb, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".spec.rules[*].host", hostname),
})
if err != nil {
return errors.Wrap(err, "cannot list *networkingv1beta1.IngressList by MatchingFieldsSelector")
}
switch len(nlb.Items) {
case 0:
break
case 1:
if f := nlb.Items[0]; f.GetName() == ingress.Name() && f.GetNamespace() == ingress.Namespace() {
break
}
fallthrough
default:
return collisionErr
}
}
}
return nil
}

View File

@@ -23,5 +23,5 @@ func NewNamespaceQuotaExceededError() error {
}
func (namespaceQuotaExceededError) Error() string {
return "Cannot exceed Namespace quota: please, reach out the system administrators"
return "Cannot exceed Namespace quota: please, reach out to the system administrators"
}

View File

@@ -82,7 +82,7 @@ func (r *handler) OnDelete(client client.Client, decoder *admission.Decoder) cap
return admission.Errored(http.StatusInternalServerError, err)
}
if ok {
return admission.Denied("Capsule Network Policies cannot be deleted: please, reach out the system administrators")
return admission.Denied("Capsule Network Policies cannot be deleted: please, reach out to the system administrators")
}
return admission.Allowed("")
@@ -102,7 +102,7 @@ func (r *handler) OnUpdate(client client.Client, decoder *admission.Decoder) cap
return admission.Errored(http.StatusInternalServerError, err)
}
if ok {
return admission.Denied("Capsule Network Policies cannot be updated: please, reach out the system administrators")
return admission.Denied("Capsule Network Policies cannot be updated: please, reach out to the system administrators")
}
return admission.Allowed("")

View File

@@ -122,7 +122,7 @@ func (h *handler) OnCreate(clt client.Client, decoder *admission.Decoder) capsul
sort.Sort(sort.Reverse(tenants))
if len(tenants) == 0 {
return admission.Denied("You do not have any Tenant assigned: please, reach out the system administrators")
return admission.Denied("You do not have any Tenant assigned: please, reach out to the system administrators")
}
if len(tenants) == 1 {

View File

@@ -19,7 +19,6 @@ package tenant
import (
"context"
"fmt"
"net/http"
"regexp"
"k8s.io/apimachinery/pkg/fields"
@@ -30,7 +29,7 @@ import (
capsulewebhook "github.com/clastix/capsule/pkg/webhook"
)
// +kubebuilder:webhook:path=/validating-v1-tenant,mutating=false,failurePolicy=fail,groups="capsule.clastix.io",resources=tenants,verbs=create,versions=v1alpha1,name=tenant.capsule.clastix.io
// +kubebuilder:webhook:path=/validating-v1-tenant,mutating=false,failurePolicy=fail,groups="capsule.clastix.io",resources=tenants,verbs=create;update,versions=v1alpha1,name=tenant.capsule.clastix.io
type webhook struct {
handler capsulewebhook.Handler
@@ -53,83 +52,139 @@ func (w webhook) GetHandler() capsulewebhook.Handler {
}
type handler struct {
checkIngressHostnamesExact bool
}
func Handler() capsulewebhook.Handler {
return &handler{}
func Handler(allowTenantIngressHostnamesCollision bool) capsulewebhook.Handler {
return &handler{checkIngressHostnamesExact: !allowTenantIngressHostnamesCollision}
}
func (h *handler) OnCreate(clt client.Client, decoder *admission.Decoder) capsulewebhook.Func {
// Validate Tenant name
func (h *handler) validateTenantName(tenant *v1alpha1.Tenant) error {
matched, _ := regexp.MatchString(`[a-z0-9]([-a-z0-9]*[a-z0-9])?`, tenant.GetName())
if !matched {
return fmt.Errorf("tenant name has forbidden characters")
}
return nil
}
// Validate ingressClasses regexp
func (h *handler) validateIngressClassesRegex(tenant *v1alpha1.Tenant) error {
if tenant.Spec.IngressClasses != nil && len(tenant.Spec.IngressClasses.Regex) > 0 {
if _, err := regexp.Compile(tenant.Spec.IngressClasses.Regex); err != nil {
return fmt.Errorf("unable to compile ingressClasses allowedRegex")
}
}
return nil
}
// Validate storageClasses regexp
func (h *handler) validateStorageClassesRegex(tenant *v1alpha1.Tenant) error {
if tenant.Spec.StorageClasses != nil && len(tenant.Spec.StorageClasses.Regex) > 0 {
if _, err := regexp.Compile(tenant.Spec.StorageClasses.Regex); err != nil {
return fmt.Errorf("unable to compile storageClasses allowedRegex")
}
}
return nil
}
// Validate containerRegistries regexp
func (h *handler) validateContainerRegistriesRegex(tenant *v1alpha1.Tenant) error {
if tenant.Spec.ContainerRegistries != nil && len(tenant.Spec.ContainerRegistries.Regex) > 0 {
if _, err := regexp.Compile(tenant.Spec.ContainerRegistries.Regex); err != nil {
return fmt.Errorf("unable to compile containerRegistries allowedRegex")
}
}
return nil
}
// Validate containerRegistries regexp
func (h *handler) validateIngressHostnamesRegex(tenant *v1alpha1.Tenant) error {
if tenant.Spec.IngressHostnames != nil && len(tenant.Spec.IngressHostnames.Regex) > 0 {
if _, err := regexp.Compile(tenant.Spec.IngressHostnames.Regex); err != nil {
return fmt.Errorf("unable to compile ingressHostnames allowedRegex")
}
}
return nil
}
// Check Ingress hostnames collision across all available Tenants
func (h *handler) validateIngressHostnamesCollision(context context.Context, clt client.Client, tenant *v1alpha1.Tenant) error {
if h.checkIngressHostnamesExact && tenant.Spec.IngressHostnames != nil && len(tenant.Spec.IngressHostnames.Exact) > 0 {
for _, h := range tenant.Spec.IngressHostnames.Exact {
tl := &v1alpha1.TenantList{}
if err := clt.List(context, tl, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".spec.ingressHostnames", h),
}); err != nil {
return fmt.Errorf("cannot retrieve Tenant list using .spec.ingressHostnames field selector: %w", err)
}
switch {
case len(tl.Items) == 1 && tl.Items[0].GetName() == tenant.GetName():
continue
case len(tl.Items) > 0:
return fmt.Errorf("the allowed hostname %s is already used by the Tenant %s, cannot proceed", h, tl.Items[0].GetName())
default:
continue
}
}
}
return nil
}
func (h *handler) validateTenant(ctx context.Context, req admission.Request, client client.Client, decoder *admission.Decoder) error {
tenant := &v1alpha1.Tenant{}
if err := decoder.Decode(req, tenant); err != nil {
return err
}
if err := h.validateTenantByRegex(tenant); err != nil {
return err
}
if err := h.validateIngressHostnamesCollision(ctx, client, tenant); err != nil {
return err
}
return nil
}
func (h *handler) validateTenantByRegex(tenant *v1alpha1.Tenant) error {
if err := h.validateTenantName(tenant); err != nil {
return err
}
if err := h.validateIngressClassesRegex(tenant); err != nil {
return err
}
if err := h.validateStorageClassesRegex(tenant); err != nil {
return err
}
if err := h.validateContainerRegistriesRegex(tenant); err != nil {
return err
}
if err := h.validateIngressHostnamesRegex(tenant); err != nil {
return err
}
return nil
}
func (h *handler) OnCreate(client client.Client, decoder *admission.Decoder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) admission.Response {
tnt := &v1alpha1.Tenant{}
if err := decoder.Decode(req, tnt); err != nil {
return admission.Errored(http.StatusBadRequest, err)
if err := h.validateTenant(ctx, req, client, decoder); err != nil {
return admission.Denied(err.Error())
}
matched, _ := regexp.MatchString(`[a-z0-9]([-a-z0-9]*[a-z0-9])?`, tnt.GetName())
if !matched {
return admission.Denied("Tenant name has forbidden characters")
}
// Validate ingressClasses regexp
if tnt.Spec.IngressClasses != nil && len(tnt.Spec.IngressClasses.Regex) > 0 {
if _, err := regexp.Compile(tnt.Spec.IngressClasses.Regex); err != nil {
return admission.Denied("Unable to compile ingressClasses allowedRegex")
}
}
// Validate storageClasses regexp
if tnt.Spec.StorageClasses != nil && len(tnt.Spec.StorageClasses.Regex) > 0 {
if _, err := regexp.Compile(tnt.Spec.StorageClasses.Regex); err != nil {
return admission.Denied("Unable to compile storageClasses allowedRegex")
}
}
// Validate containerRegistries regexp
if tnt.Spec.ContainerRegistries != nil && len(tnt.Spec.ContainerRegistries.Regex) > 0 {
if _, err := regexp.Compile(tnt.Spec.ContainerRegistries.Regex); err != nil {
return admission.Denied("Unable to compile containerRegistries allowedRegex")
}
}
// Validate ingressHostnames regexp
if tnt.Spec.IngressHostnames != nil && len(tnt.Spec.IngressHostnames.Regex) > 0 {
if _, err := regexp.Compile(tnt.Spec.IngressHostnames.Regex); err != nil {
return admission.Denied("Unable to compile ingressHostnames allowedRegex")
}
}
if tnt.Spec.IngressHostnames != nil && len(tnt.Spec.IngressHostnames.Exact) > 0 {
for _, h := range tnt.Spec.IngressHostnames.Exact {
tl := &v1alpha1.TenantList{}
err := clt.List(ctx, tl, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".spec.ingressHostnames", h),
})
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if len(tl.Items) > 0 {
return admission.Denied(fmt.Sprintf("The allowed hostname %s is already used by the Tenant %s, cannot proceed", h, tl.Items[0].GetName()))
}
}
if _, err := regexp.Compile(tnt.Spec.IngressHostnames.Regex); err != nil {
return admission.Denied("Unable to compile ingressHostnames allowedRegex")
}
}
return admission.Allowed("")
}
}
func (h *handler) OnDelete(client client.Client, decoder *admission.Decoder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) admission.Response {
func (h *handler) OnDelete(client.Client, *admission.Decoder) capsulewebhook.Func {
return func(context.Context, admission.Request) admission.Response {
return admission.Allowed("")
}
}
func (h *handler) OnUpdate(client client.Client, decoder *admission.Decoder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) admission.Response {
if err := h.validateTenant(ctx, req, client, decoder); err != nil {
return admission.Denied(err.Error())
}
return admission.Allowed("")
}
}

View File

@@ -76,7 +76,7 @@ func (r *handler) OnCreate(clt client.Client, decoder *admission.Decoder) capsul
}
if r.protectedNamespacesRegex != nil {
if matched := r.protectedNamespacesRegex.MatchString(ns.GetName()); matched {
return admission.Denied("Creating namespaces with name matching " + r.protectedNamespacesRegex.String() + " regexp is not allowed; please, reach out the system administrators")
return admission.Denied("Creating namespaces with name matching " + r.protectedNamespacesRegex.String() + " regexp is not allowed; please, reach out to the system administrators")
}
}

View File

@@ -40,7 +40,14 @@ type handler struct {
// If the user performing action is not a Capsule user, can be skipped
func (h handler) isCapsuleUser(req admission.Request) bool {
return utils.UserGroupList(req.UserInfo.Groups).IsInCapsuleGroup(h.capsuleGroup)
g := utils.NewUserGroupList(req.UserInfo.Groups)
// if the user is a ServiceAccount belonging to the kube-system namespace, definitely, it's not a Capsule user
// and we can skip the check in case of Capsule user group assigned to system:authenticated
// (ref: https://github.com/clastix/capsule/issues/234)
if g.Find("system:serviceaccounts:kube-system") {
return false
}
return g.Find(h.capsuleGroup)
}
func (h *handler) OnCreate(client client.Client, decoder *admission.Decoder) webhook.Func {