Compare commits
34 Commits
0.2.0-rc1
...
v0.0.5-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16906db309 | ||
|
|
d25ed7f2df | ||
|
|
51f5bec5a6 | ||
|
|
d3f3f93a24 | ||
|
|
24bd363ee0 | ||
|
|
504241a948 | ||
|
|
d2700556dd | ||
|
|
89c66de7c6 | ||
|
|
a2109b74ef | ||
|
|
4dc92451ea | ||
|
|
46a7a0b917 | ||
|
|
1ed5d703e6 | ||
|
|
cb986384db | ||
|
|
49c8131eb5 | ||
|
|
82bbd238fb | ||
|
|
03eb6e633e | ||
|
|
6e24aad094 | ||
|
|
aa6881e32e | ||
|
|
98e441f1e9 | ||
|
|
007bdff512 | ||
|
|
a3c77b3531 | ||
|
|
3e38884a6c | ||
|
|
40130696bb | ||
|
|
12a8c469e8 | ||
|
|
27cdd84b3b | ||
|
|
f6fd0cfe3f | ||
|
|
0641350575 | ||
|
|
5aed7a01d5 | ||
|
|
8442eef72b | ||
|
|
d3bc9f4870 | ||
|
|
6541f19b67 | ||
|
|
45709f7bd3 | ||
|
|
2d628e1cd0 | ||
|
|
ea599ba6e6 |
1
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -40,4 +40,5 @@ you'd get this by running `kubectl -n capsule-system logs deploy/capsule-control
|
||||
# Additional context
|
||||
|
||||
- Capsule version: (`capsule --version`)
|
||||
- Helm Chart version: (`helm list -n capsule-system`)
|
||||
- Kubernetes version: (`kubectl version`)
|
||||
|
||||
45
.github/workflows/ci.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 2m --config .golangci.yml
|
||||
diff:
|
||||
name: diff
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: go-mod
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ runner.os }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-
|
||||
${{ runner.os }}-
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
- name: Checking if manifests generated untracked files
|
||||
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
|
||||
- run: make fmt vet
|
||||
- name: Checking if source code is not formatted
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
50
.github/workflows/e2e.yml
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
name: e2e
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
kind:
|
||||
name: Kubernetes
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.0']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Cache Go modules and Docker images
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: gomod-docker
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/var/lib/docker
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ matrix.k8s-version }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ matrix.k8s-version }}-build-
|
||||
${{ matrix.k8s-version }}-
|
||||
- run: make manifests
|
||||
- name: Checking if manifests are disaligned
|
||||
run: test -z "$(git diff 2> /dev/null)"
|
||||
- name: Checking if manifests generated untracked files
|
||||
run: test -z "$(git ls-files --others --exclude-standard 2> /dev/null)"
|
||||
- name: Installing Ginkgo
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.8'
|
||||
- uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
- name: e2e testing
|
||||
run: make e2e/${{ matrix.k8s-version }}
|
||||
35
.github/workflows/helm.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Helm Chart
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: azure/setup-helm@v1
|
||||
with:
|
||||
version: 3.3.4
|
||||
- name: Linting Chart
|
||||
run: helm lint ./charts/capsule
|
||||
release:
|
||||
if: github.ref == 'refs/heads/master'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Publish Helm chart
|
||||
uses: stefanprodan/helm-gh-pages@master
|
||||
with:
|
||||
token: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
charts_dir: charts
|
||||
charts_url: https://clastix.github.io/charts
|
||||
owner: clastix
|
||||
repository: charts
|
||||
branch: gh-pages
|
||||
target_dir: .
|
||||
commit_username: prometherion
|
||||
commit_email: dario@tranchitella.eu
|
||||
61
.github/workflows/main.yml
vendored
@@ -1,61 +0,0 @@
|
||||
# This is a basic workflow to help you get started with Actions
|
||||
|
||||
name: CI
|
||||
|
||||
# Controls when the action will run. Triggers the workflow on push or pull request
|
||||
# events but only for the master branch
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
|
||||
jobs:
|
||||
# This workflow contains a single job called "build"
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.2.0
|
||||
with:
|
||||
# version of golangci-lint to use in form of v1.2.3
|
||||
version: latest
|
||||
# if set to true and the action runs on a pull request - the action outputs only newly found issues
|
||||
only-new-issues: false
|
||||
args: --timeout 2m
|
||||
kind:
|
||||
name: e2e
|
||||
strategy:
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.1']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Cache Go modules and Docker images
|
||||
uses: actions/cache@v1
|
||||
env:
|
||||
cache-name: gomod-docker
|
||||
with:
|
||||
path: |
|
||||
~/go/pkg/mod
|
||||
/var/lib/docker
|
||||
/home/runner/work/capsule/capsule
|
||||
key: ${{ matrix.k8s-version }}-build-${{ env.cache-name }}
|
||||
restore-keys: |
|
||||
${{ matrix.k8s-version }}-build-
|
||||
${{ matrix.k8s-version }}-
|
||||
- name: Removing kustomize
|
||||
run: sudo snap remove kustomize && sudo rm -rf $(which kustomize)
|
||||
- name: Installing Ginkgo
|
||||
run: go get github.com/onsi/ginkgo/ginkgo
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.13.8'
|
||||
- uses: engineerd/setup-kind@v0.4.0
|
||||
with:
|
||||
skipClusterCreation: true
|
||||
- name: e2e testing
|
||||
run: make e2e/${{ matrix.k8s-version }}
|
||||
4
.gitignore
vendored
@@ -23,4 +23,6 @@ bin
|
||||
*.swo
|
||||
*~
|
||||
|
||||
hack/*.kubeconfig
|
||||
**/*.kubeconfig
|
||||
.DS_Store
|
||||
|
||||
|
||||
59
.golangci.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
goimports:
|
||||
local-prefixes: github.com/clastix/capsule
|
||||
dupl:
|
||||
threshold: 100
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 2
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- goconst
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- misspell
|
||||
- nolintlint
|
||||
- rowserrcheck
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
- maligned
|
||||
|
||||
issues:
|
||||
exclude:
|
||||
- Using the variable on range scope .* in function literal
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.33.x
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- "zz_.*\\.go$"
|
||||
18
Dockerfile
@@ -11,15 +11,25 @@ RUN go mod download
|
||||
|
||||
# Copy the go source
|
||||
COPY main.go main.go
|
||||
COPY version.go version.go
|
||||
COPY api/ api/
|
||||
COPY controllers/ controllers/
|
||||
COPY pkg/ pkg/
|
||||
COPY version/ version/
|
||||
|
||||
ARG VERSION
|
||||
COPY .git .git
|
||||
|
||||
# Build
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -ldflags "-X github.com/clastix/capsule/version.Version=${VERSION}" -o manager main.go
|
||||
RUN git config --get remote.origin.url > /tmp/GIT_REPO && \
|
||||
git rev-parse --short HEAD > /tmp/GIT_HEAD_COMMIT && \
|
||||
git describe --abbrev=0 --tags > /tmp/GIT_LAST_TAG && \
|
||||
git rev-parse --short $(cat /tmp/GIT_LAST_TAG) > /tmp/GIT_TAG_COMMIT && \
|
||||
git diff $(cat /tmp/GIT_HEAD_COMMIT) $(cat /tmp/GIT_TAG_COMMIT) --quiet > /tmp/GIT_MODIFIED1 || echo '.dev' > /tmp/GIT_MODIFIED1 && \
|
||||
git diff --quiet > /tmp/GIT_MODIFIED2 || echo '.dirty' > /tmp/GIT_MODIFIED2 && \
|
||||
cat /tmp/GIT_MODIFIED1 /tmp/GIT_MODIFIED2 | tr -d '\n' > /tmp/GIT_MODIFIED && \
|
||||
date '+%Y-%m-%dT%H:%M:%S' > /tmp/BUILD_DATE &&\
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build \
|
||||
-gcflags "-N -l" \
|
||||
-ldflags "-X main.GitRepo=$(cat /tmp/GIT_REPO) -X main.GitTag=$(cat /tmp/GIT_LAST_TAG) -X main.GitCommit=$(cat /tmp/GIT_HEAD_COMMIT) -X main.GitDirty=$(cat /tmp/GIT_MODIFIED) -X main.BuildTime=$(cat /tmp/BUILD_DATE)" \
|
||||
-o manager
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
||||
12
Makefile
@@ -1,5 +1,5 @@
|
||||
# Current Operator version
|
||||
VERSION ?= 0.0.1
|
||||
VERSION ?= $$(git describe --abbrev=0 --tags)
|
||||
|
||||
# Default bundle image tag
|
||||
BUNDLE_IMG ?= quay.io/clastix/capsule:$(VERSION)-bundle
|
||||
@@ -15,7 +15,7 @@ BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
|
||||
# Image URL to use all building/pushing image targets
|
||||
IMG ?= quay.io/clastix/capsule:$(VERSION)
|
||||
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true"
|
||||
CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false"
|
||||
|
||||
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
@@ -75,7 +75,7 @@ generate: controller-gen
|
||||
|
||||
# Build the docker image
|
||||
docker-build: test
|
||||
docker build . --build-arg=VERSION=${VERSION} -t ${IMG}
|
||||
docker build . -t ${IMG}
|
||||
|
||||
# Push the docker image
|
||||
docker-push:
|
||||
@@ -131,7 +131,7 @@ goimports:
|
||||
# Linting code as PR is expecting
|
||||
.PHONY: golint
|
||||
golint:
|
||||
golangci-lint run
|
||||
golangci-lint run -c .golangci.yml
|
||||
|
||||
# Running e2e tests in a KinD instance
|
||||
.PHONY: e2e
|
||||
@@ -139,7 +139,7 @@ e2e/%:
|
||||
kind create cluster --name capsule --image=kindest/node:$*
|
||||
make docker-build
|
||||
kind load docker-image --nodes capsule-control-plane --name capsule $(IMG)
|
||||
make deploy
|
||||
while [ -z $$(kubectl -n capsule-system get secret capsule-tls -o jsonpath='{.data.tls\.crt}') ]; do echo "waiting Capsule to be up and running..." && sleep 5; done
|
||||
kubectl create namespace capsule-system
|
||||
helm upgrade --install --namespace capsule-system capsule ./charts/capsule --set 'manager.image.pullPolicy=Never' --set 'manager.resources=null'
|
||||
ginkgo -v -tags e2e ./e2e
|
||||
kind delete cluster --name capsule
|
||||
|
||||
191
README.md
@@ -1,10 +1,5 @@
|
||||
# Capsule
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/logo/space-capsule3.png" />
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<p align="left">
|
||||
<img src="https://img.shields.io/github/license/clastix/capsule"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/capsule"/>
|
||||
<a href="https://github.com/clastix/capsule/releases">
|
||||
@@ -12,33 +7,58 @@
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/logo/capsule_medium.png" />
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
# Kubernetes multi-tenancy made simple
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it becomes soon complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
|
||||
|
||||
# A multi-tenant operator for Kubernetes
|
||||
This project provides a custom operator for implementing a strong
|
||||
multi-tenant environment in _Kubernetes_. **Capsule** is not intended to be yet another _PaaS_, instead, it has been designed as a lightweight tool with a minimalist approach leveraging only the standard features of upstream Kubernetes.
|
||||
# Entering Caspule
|
||||
Capsule takes a different approach. In a single cluster, the Capsule Controller aggregates multiple namespaces in a lightweight abstraction called _Tenant_. Within each tenant, users are free to create their namespaces and share all the assigned resources while the Capsule Policy Engine keeps the different tenants isolated from each other.
|
||||
|
||||
# Which is the problem to solve?
|
||||
Kubernetes introduced the _namespace_ resource to create logical partitions of the
|
||||
cluster. A Kubernetes namespace creates a sort of isolated *slice* in the
|
||||
cluster: _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, and
|
||||
_RBAC_ can be used to enforce isolation among different namespaces. Namespace isolation shines when Kubernetes is used to isolate the different environments or the different types of applications. Also, it works well to isolate applications serving different users when implementing the SaaS delivery model.
|
||||
The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. And users are free to operate their tenants in authonomy, without the intervention of the cluster administrator. Take a look at following diagram:
|
||||
|
||||
However, implementing advanced multi-tenancy scenarios, for example, a private or public _Container-as-a-Service_ platform, it becomes soon complicated because of the flat structure of Kubernetes namespaces. In such scenarios, different groups of users get assigned a pool of namespaces with a limited amount of resources (e.g.: _nodes_, _vCPU_, _RAM_, _ephemeral and persistent storage_). When users need more namespaces or move resources from one namespace to another, they always need the intervention of the cluster admin because each namespace still works as an isolated environment. To work around this, and not being overwhelmed by continuous users' requests, cluster admins often choose to create multiple smaller clusters and assign a dedicated cluster to each organization or group of users leading to the well know and painful phenomena of the _clusters sprawl_.
|
||||
<p align="center" style="padding: 60px 20px">
|
||||
<img src="assets/capsule-operator.svg" />
|
||||
</p>
|
||||
|
||||
**Capsule** takes a different approach. It aggregates multiple namespaces assigned to an organization or group of users in a lightweight abstraction called _Tenant_. Within each tenant, users are free to create their namespaces and share all the assigned resources between the namespaces of the tenant. The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other constraints defined at the tenant level are automatically inherited by all the namespaces in the tenant leaving the tenant's users to freely allocate resources without any intervention of the cluster administrator.
|
||||
# Features
|
||||
## Self-Service
|
||||
Leave to developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
# Use cases for Capsule
|
||||
Please, refer to the corresponding [section](use_cases.md) for a more detailed list of use cases that Capsule can address.
|
||||
## Preventing Clusters Sprawl
|
||||
Share a single cluster with multiple teams, groups of users, or departments by saving operational and management efforts.
|
||||
|
||||
## Governance
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet legal requirements.
|
||||
|
||||
## Resources Control
|
||||
Take control of the resources consumed by users while preventing them to overtake.
|
||||
|
||||
## Native Experience
|
||||
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customised binaries.
|
||||
|
||||
## GitOps ready
|
||||
Capsule is completely declarative and GitOps ready.
|
||||
|
||||
## Bring your own device (BYOD)
|
||||
Assign to tenants a dedicated set of compute, storage, and network resources and avoid the noisy neighbors' effect.
|
||||
|
||||
# Common use cases for Capsule
|
||||
Please, refer to the corresponding [section](./docs/operator/use-cases/overview.md) in the project documentation for a detailed list of common use cases that Capsule can address.
|
||||
|
||||
# Installation
|
||||
Make sure you have access to a Kubernetes cluster as an administrator.
|
||||
Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](https://github.com/clastix/capsule-helm-chart)
|
||||
* Use the Helm Chart available [here](./charts/capsule/README.md)
|
||||
* Use [`kustomize`](https://github.com/kubernetes-sigs/kustomize)
|
||||
|
||||
## Install with kustomize
|
||||
@@ -47,52 +67,46 @@ Ensure you have `kubectl` and `kustomize` installed in your `PATH`.
|
||||
Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
make deploy
|
||||
# /home/prometherion/go/bin/controller-gen "crd:trivialVersions=true" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
# cd config/manager && /usr/local/bin/kustomize edit set image controller=quay.io/clastix/capsule:latest
|
||||
# /usr/local/bin/kustomize build config/default | kubectl apply -f -
|
||||
# namespace/capsule-system created
|
||||
# customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io created
|
||||
# clusterrole.rbac.authorization.k8s.io/capsule-proxy-role created
|
||||
# clusterrole.rbac.authorization.k8s.io/capsule-metrics-reader created
|
||||
# clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding created
|
||||
# clusterrolebinding.rbac.authorization.k8s.io/capsule-proxy-rolebinding created
|
||||
# secret/capsule-ca created
|
||||
# secret/capsule-tls created
|
||||
# service/capsule-controller-manager-metrics-service created
|
||||
# service/capsule-webhook-service created
|
||||
# deployment.apps/capsule-controller-manager created
|
||||
# mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration created
|
||||
# validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration created
|
||||
$ git clone https://github.com/clastix/capsule
|
||||
$ cd capsule
|
||||
$ make deploy
|
||||
```
|
||||
|
||||
Log verbosity of the Capsule controller can be increased by passing the `--zap-log-level` option with a value from `1` to `10` or the [basic keywords](https://godoc.org/go.uber.org/zap/zapcore#Level) although it is suggested to use the `--zap-devel` flag to get also stack traces.
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
During startup Capsule controller will create additional ClusterRoles `capsule-namespace-deleter`, `capsule-namespace-provisioner` and ClusterRoleBinding `capsule-namespace-provisioner`. These resources are used in order to allow Capsule users to manage their namespaces in tenants.
|
||||
## How to create Tenants
|
||||
Use the scaffold [Tenant](config/samples/capsule_v1alpha1_tenant.yaml) and simply apply as cluster admin.
|
||||
|
||||
You can disallow users to create namespaces matching a particular regexp by passing `--protected-namespace-regex` option with a value of regular expression.
|
||||
```
|
||||
$ kubectl apply -f config/samples/capsule_v1alpha1_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
```
|
||||
|
||||
## Admission Controllers
|
||||
Capsule implements Kubernetes multi-tenancy capabilities using a minimum set of standard [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled on the Kubernetes APIs server: `--enable-admission-plugins=PodNodeSelector,LimitRanger,ResourceQuota,MutatingAdmissionWebhook,ValidatingAdmissionWebhook`. In addition to these default controllers, Capsule implements its own set of Admission Controllers through the [Dynamic Admission Controller](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/), providing callbacks to add further validation or resource patching.
|
||||
You can check the tenant just created as
|
||||
|
||||
All these requests must be served via HTTPS and a CA must be provided to ensure that
|
||||
the API Server is communicating with the right client. Capsule upon installation is setting its custom Certificate Authority as a client certificate as well, updating all the required resources to minimize the operational tasks.
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 0 alice User 1m
|
||||
```
|
||||
|
||||
## Tenant users
|
||||
Each tenant comes with a delegated user acting as the tenant admin. In the Capsule jargon, this user is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant owner.
|
||||
## Tenant owners
|
||||
Each tenant comes with a delegated user or group of users acting as the tenant admin. In the Capsule jargon, this is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant Owner.
|
||||
|
||||
Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the authentication strategy in your cluster. For example, if you are using `capsule.clastix.io` as your `--capsule-user-group`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"/capsule.clastix.io",
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
]
|
||||
```
|
||||
|
||||
in their token.
|
||||
@@ -112,51 +126,62 @@ kubeconfig file is: alice-oil.kubeconfig
|
||||
to use it as alice export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
## How to create a Tenant
|
||||
Use the [scaffold Tenant](config/samples/capsule_v1alpha1_tenant.yaml)
|
||||
and simply apply as Cluster Admin.
|
||||
## Working with Tenants
|
||||
Log in to the Kubernetes cluster as `alice` tenant owner
|
||||
|
||||
```
|
||||
kubectl apply -f config/samples/capsule_v1alpha1_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
$ export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
The related Tenant owner `alice` can create Namespaces according to their assigned quota: happy Kubernetes cluster administration!
|
||||
and create a couple of new namespaces
|
||||
|
||||
```
|
||||
$ kubectl create namespace oil-production
|
||||
$ kubectl create namespace oil-development
|
||||
```
|
||||
|
||||
As user `alice` you can operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden:
|
||||
User "alice" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
# Documentation
|
||||
Please, check the project [documentation](./docs/index.md) for more cool things you can do with Capsule.
|
||||
|
||||
# Removal
|
||||
Similar to `deploy`, you can get rid of Capsule using the `remove` target.
|
||||
|
||||
```
|
||||
make remove
|
||||
# /home/prometherion/go/bin/controller-gen "crd:trivialVersions=true" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
# /usr/local/bin/kustomize build config/default | kubectl delete -f -
|
||||
# namespace "capsule-system" deleted
|
||||
# customresourcedefinition.apiextensions.k8s.io "tenants.capsule.clastix.io" deleted
|
||||
# clusterrole.rbac.authorization.k8s.io "capsule-proxy-role" deleted
|
||||
# clusterrole.rbac.authorization.k8s.io "capsule-metrics-reader" deleted
|
||||
# clusterrolebinding.rbac.authorization.k8s.io "capsule-manager-rolebinding" deleted
|
||||
# clusterrolebinding.rbac.authorization.k8s.io "capsule-proxy-rolebinding" deleted
|
||||
# secret "capsule-ca" deleted
|
||||
# secret "capsule-tls" deleted
|
||||
# service "capsule-controller-manager-metrics-service" deleted
|
||||
# service "capsule-webhook-service" deleted
|
||||
# deployment.apps "capsule-controller-manager" deleted
|
||||
# mutatingwebhookconfiguration.admissionregistration.k8s.io "capsule-mutating-webhook-configuration" deleted
|
||||
# validatingwebhookconfiguration.admissionregistration.k8s.io "capsule-validating-webhook-configuration" deleted
|
||||
$ make remove
|
||||
```
|
||||
|
||||
# How to contribute
|
||||
Any contribution is welcome! Please refer to the corresponding [section](contributing.md).
|
||||
|
||||
# Production Grade
|
||||
|
||||
Although under frequent development and improvements, Capsule is ready to be used in production environments: check out the **Release** page for a detailed list of available versions.
|
||||
|
||||
# FAQ
|
||||
tbd
|
||||
- Q. How to pronunce Capsule?
|
||||
|
||||
# Changelog
|
||||
tbd
|
||||
A. It should be pronounced as `/ˈkæpsjuːl/`.
|
||||
|
||||
# Roadmap
|
||||
tbd
|
||||
- Q. Can I contribute?
|
||||
|
||||
A. Absolutely! Capsule is Open Source with Apache 2 license and any contribution is welcome. Please refer to the corresponding [section](./docs/operator/contributing.md) in the documentation.
|
||||
|
||||
- Q. Is it production grade?
|
||||
|
||||
A. Although under frequent development and improvements, Capsule is ready to be used in production environments as currently, people are using it in public and private deployments. Check out the [release](https://github.com/clastix/capsule/releases) page for a detailed list of available versions.
|
||||
|
||||
- Q. Does it work with my Kuberentes XYZ distribution?
|
||||
|
||||
A. We tested Capsule with vanilla Kubernetes 1.16+ on private envirnments and public clouds. We expect it works smootly on any other distribution. Please, let us know if you find it doesn't.
|
||||
|
||||
- Q. Do you provide commercial support?
|
||||
|
||||
A. Yes, we're available to help and provide commercial support. [Clastix](https://clastix.io) is the company behind Capsule. Please, contact us for a quote.
|
||||
|
||||
46
api/v1alpha1/allowed_list.go
Normal file
@@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2020 Clastix Labs.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type AllowedListSpec struct {
|
||||
Exact []string `json:"allowed,omitempty"`
|
||||
Regex string `json:"allowedRegex,omitempty"`
|
||||
}
|
||||
|
||||
func (in *AllowedListSpec) ExactMatch(value string) (ok bool) {
|
||||
if len(in.Exact) > 0 {
|
||||
sort.SliceStable(in.Exact, func(i, j int) bool {
|
||||
return strings.ToLower(in.Exact[i]) < strings.ToLower(in.Exact[j])
|
||||
})
|
||||
i := sort.SearchStrings(in.Exact, value)
|
||||
ok = i < len(in.Exact) && in.Exact[i] == value
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (in AllowedListSpec) RegexMatch(value string) (ok bool) {
|
||||
if len(in.Regex) > 0 {
|
||||
ok = regexp.MustCompile(in.Regex).MatchString(value)
|
||||
}
|
||||
return
|
||||
}
|
||||
80
api/v1alpha1/allowed_list_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
/*
|
||||
Copyright 2020 Clastix Labs.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAllowedListSpec_ExactMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
In []string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"foo", "bar", "bizz", "buzz"},
|
||||
[]string{"bing", "bong"},
|
||||
},
|
||||
{
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"one", "two", "three"},
|
||||
[]string{"a", "b", "c"},
|
||||
},
|
||||
{
|
||||
nil,
|
||||
nil,
|
||||
[]string{"any", "value"},
|
||||
},
|
||||
} {
|
||||
a := AllowedListSpec{
|
||||
Exact: tc.In,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.ExactMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.ExactMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllowedListSpec_RegexMatch(t *testing.T) {
|
||||
type tc struct {
|
||||
Regex string
|
||||
True []string
|
||||
False []string
|
||||
}
|
||||
for _, tc := range []tc{
|
||||
{`first-\w+-pattern`, []string{"first-date-pattern", "first-year-pattern"}, []string{"broken", "first-year", "second-date-pattern"}},
|
||||
{``, nil, []string{"any", "value"}},
|
||||
} {
|
||||
a := AllowedListSpec{
|
||||
Regex: tc.Regex,
|
||||
}
|
||||
for _, ok := range tc.True {
|
||||
assert.True(t, a.RegexMatch(ok))
|
||||
}
|
||||
for _, ko := range tc.False {
|
||||
assert.False(t, a.RegexMatch(ko))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,7 @@ limitations under the License.
|
||||
|
||||
package domain
|
||||
|
||||
type SearchIn interface {
|
||||
IsStringInList(value string) bool
|
||||
type AllowedList interface {
|
||||
ExactMatch(value string) bool
|
||||
RegexMatch(value string) bool
|
||||
}
|
||||
85
api/v1alpha1/domain/registry.go
Normal file
@@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2020 Clastix Labs.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package domain
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const defaultRegistryName = "docker.io"
|
||||
|
||||
type registry map[string]string
|
||||
|
||||
func (r registry) Registry() string {
|
||||
res, ok := r["registry"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if len(res) == 0 {
|
||||
return defaultRegistryName
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (r registry) Repository() string {
|
||||
res, ok := r["repository"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if res == defaultRegistryName {
|
||||
return ""
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (r registry) Image() string {
|
||||
res, ok := r["image"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (r registry) Tag() string {
|
||||
res, ok := r["tag"]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
if len(res) == 0 {
|
||||
res = "latest"
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func NewRegistry(value string) Registry {
|
||||
reg := make(registry)
|
||||
r := regexp.MustCompile(`(((?P<registry>[a-zA-Z0-9-.]+)\/)?((?P<repository>[a-zA-Z0-9-.]+)\/))?(?P<image>[a-zA-Z0-9-.]+)(:(?P<tag>[a-zA-Z0-9-.]+))?`)
|
||||
match := r.FindStringSubmatch(value)
|
||||
for i, name := range r.SubexpNames() {
|
||||
if i > 0 && i <= len(match) {
|
||||
reg[name] = match[i]
|
||||
}
|
||||
}
|
||||
return reg
|
||||
}
|
||||
|
||||
type Registry interface {
|
||||
Registry() string
|
||||
Repository() string
|
||||
Image() string
|
||||
Tag() string
|
||||
}
|
||||
78
api/v1alpha1/domain/registry_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2020 Clastix Labs.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package domain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewRegistry(t *testing.T) {
|
||||
type tc struct {
|
||||
registry string
|
||||
repo string
|
||||
image string
|
||||
tag string
|
||||
}
|
||||
for name, tc := range map[string]tc{
|
||||
"docker.io/my-org/my-repo:v0.0.1": {
|
||||
registry: "docker.io",
|
||||
repo: "my-org",
|
||||
image: "my-repo",
|
||||
tag: "v0.0.1",
|
||||
},
|
||||
"unnamed/repository:1.2.3": {
|
||||
registry: "docker.io",
|
||||
repo: "unnamed",
|
||||
image: "repository",
|
||||
tag: "1.2.3",
|
||||
},
|
||||
"quay.io/clastix/capsule:v1.0.0": {
|
||||
registry: "quay.io",
|
||||
repo: "clastix",
|
||||
image: "capsule",
|
||||
tag: "v1.0.0",
|
||||
},
|
||||
"docker.io/redis:alpine": {
|
||||
registry: "docker.io",
|
||||
repo: "",
|
||||
image: "redis",
|
||||
tag: "alpine",
|
||||
},
|
||||
"nginx:alpine": {
|
||||
registry: "docker.io",
|
||||
repo: "",
|
||||
image: "nginx",
|
||||
tag: "alpine",
|
||||
},
|
||||
"nginx": {
|
||||
registry: "docker.io",
|
||||
repo: "",
|
||||
image: "nginx",
|
||||
tag: "latest",
|
||||
},
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
r := NewRegistry(name)
|
||||
assert.Equal(t, tc.registry, r.Registry())
|
||||
assert.Equal(t, tc.repo, r.Repository())
|
||||
assert.Equal(t, tc.image, r.Image())
|
||||
assert.Equal(t, tc.tag, r.Tag())
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -18,26 +18,25 @@ package v1alpha1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StorageClassList []string
|
||||
type IngressHostnamesList []string
|
||||
|
||||
func (n StorageClassList) Len() int {
|
||||
return len(n)
|
||||
func (hostnames IngressHostnamesList) Len() int {
|
||||
return len(hostnames)
|
||||
}
|
||||
|
||||
func (n StorageClassList) Swap(i, j int) {
|
||||
n[i], n[j] = n[j], n[i]
|
||||
func (hostnames IngressHostnamesList) Swap(i, j int) {
|
||||
hostnames[i], hostnames[j] = hostnames[j], hostnames[i]
|
||||
}
|
||||
|
||||
func (n StorageClassList) Less(i, j int) bool {
|
||||
return strings.ToLower(n[i]) < strings.ToLower(n[j])
|
||||
func (hostnames IngressHostnamesList) Less(i, j int) bool {
|
||||
return hostnames[i] < hostnames[j]
|
||||
}
|
||||
|
||||
func (n StorageClassList) IsStringInList(value string) (ok bool) {
|
||||
sort.Sort(n)
|
||||
i := sort.SearchStrings(n, value)
|
||||
ok = i < n.Len() && n[i] == value
|
||||
func (hostnames IngressHostnamesList) IsStringInList(value string) (ok bool) {
|
||||
sort.Sort(hostnames)
|
||||
i := sort.SearchStrings(hostnames, value)
|
||||
ok = i < hostnames.Len() && hostnames[i] == value
|
||||
return
|
||||
}
|
||||
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,8 +25,14 @@ const (
|
||||
AvailableIngressClassesRegexpAnnotation = "capsule.clastix.io/ingress-classes-regexp"
|
||||
AvailableStorageClassesAnnotation = "capsule.clastix.io/storage-classes"
|
||||
AvailableStorageClassesRegexpAnnotation = "capsule.clastix.io/storage-classes-regexp"
|
||||
AllowedRegistriesAnnotation = "capsule.clastix.io/allowed-registries"
|
||||
AllowedRegistriesRegexpAnnotation = "capsule.clastix.io/allowed-registries-regexp"
|
||||
)
|
||||
|
||||
func UsedQuotaFor(resource corev1.ResourceName) string {
|
||||
func UsedQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/used-" + resource.String()
|
||||
}
|
||||
|
||||
func HardQuotaFor(resource fmt.Stringer) string {
|
||||
return "quota.capsule.clastix.io/hard-" + resource.String()
|
||||
}
|
||||
|
||||
@@ -23,7 +23,11 @@ import (
|
||||
)
|
||||
|
||||
func (t *Tenant) IsFull() bool {
|
||||
return t.Status.Namespaces.Len() >= int(t.Spec.NamespaceQuota)
|
||||
// we don't have limits on assigned Namespaces
|
||||
if t.Spec.NamespaceQuota == nil {
|
||||
return false
|
||||
}
|
||||
return len(t.Status.Namespaces) >= int(*t.Spec.NamespaceQuota)
|
||||
}
|
||||
|
||||
func (t *Tenant) AssignNamespaces(namespaces []corev1.Namespace) {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -34,6 +35,8 @@ func GetTypeLabel(t runtime.Object) (label string, err error) {
|
||||
return "capsule.clastix.io/network-policy", nil
|
||||
case *corev1.ResourceQuota:
|
||||
return "capsule.clastix.io/resource-quota", nil
|
||||
case *rbacv1.RoleBinding:
|
||||
return "capsule.clastix.io/role-binding", nil
|
||||
default:
|
||||
err = fmt.Errorf("type %T is not mapped as Capsule label recognized", v)
|
||||
}
|
||||
|
||||
@@ -19,49 +19,51 @@ package v1alpha1
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +kubebuilder:validation:Minimum=1
|
||||
type NamespaceQuota uint
|
||||
|
||||
type AdditionalMetadata struct {
|
||||
// +nullable
|
||||
AdditionalLabels map[string]string `json:"additionalLabels"`
|
||||
// +nullable
|
||||
AdditionalAnnotations map[string]string `json:"additionalAnnotations"`
|
||||
AdditionalLabels map[string]string `json:"additionalLabels,omitempty"`
|
||||
AdditionalAnnotations map[string]string `json:"additionalAnnotations,omitempty"`
|
||||
}
|
||||
|
||||
type StorageClassesSpec struct {
|
||||
// +nullable
|
||||
Allowed StorageClassList `json:"allowed"`
|
||||
// +nullable
|
||||
AllowedRegex string `json:"allowedRegex"`
|
||||
type IngressHostnamesSpec struct {
|
||||
Allowed IngressHostnamesList `json:"allowed"`
|
||||
AllowedRegex string `json:"allowedRegex"`
|
||||
}
|
||||
|
||||
type IngressClassesSpec struct {
|
||||
// +nullable
|
||||
Allowed IngressClassList `json:"allowed"`
|
||||
// +nullable
|
||||
AllowedRegex string `json:"allowedRegex"`
|
||||
// +kubebuilder:validation:Pattern="^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$"
|
||||
type AllowedIP string
|
||||
|
||||
type ExternalServiceIPs struct {
|
||||
Allowed []AllowedIP `json:"allowed"`
|
||||
}
|
||||
|
||||
// TenantSpec defines the desired state of Tenant
|
||||
type TenantSpec struct {
|
||||
Owner OwnerSpec `json:"owner"`
|
||||
// +kubebuilder:validation:Optional
|
||||
NamespacesMetadata AdditionalMetadata `json:"namespacesMetadata"`
|
||||
// +kubebuilder:validation:Optional
|
||||
ServicesMetadata AdditionalMetadata `json:"servicesMetadata"`
|
||||
StorageClasses StorageClassesSpec `json:"storageClasses"`
|
||||
IngressClasses IngressClassesSpec `json:"ingressClasses"`
|
||||
// +kubebuilder:validation:Optional
|
||||
NodeSelector map[string]string `json:"nodeSelector"`
|
||||
NamespaceQuota NamespaceQuota `json:"namespaceQuota"`
|
||||
NetworkPolicies []networkingv1.NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
LimitRanges []corev1.LimitRangeSpec `json:"limitRanges"`
|
||||
// +kubebuilder:validation:Optional
|
||||
ResourceQuota []corev1.ResourceQuotaSpec `json:"resourceQuotas"`
|
||||
|
||||
//+kubebuilder:validation:Minimum=1
|
||||
NamespaceQuota *int32 `json:"namespaceQuota,omitempty"`
|
||||
NamespacesMetadata AdditionalMetadata `json:"namespacesMetadata,omitempty"`
|
||||
ServicesMetadata AdditionalMetadata `json:"servicesMetadata,omitempty"`
|
||||
StorageClasses *AllowedListSpec `json:"storageClasses,omitempty"`
|
||||
IngressClasses *AllowedListSpec `json:"ingressClasses,omitempty"`
|
||||
IngressHostnames *AllowedListSpec `json:"ingressHostnames,omitempty"`
|
||||
ContainerRegistries *AllowedListSpec `json:"containerRegistries,omitempty"`
|
||||
NodeSelector map[string]string `json:"nodeSelector,omitempty"`
|
||||
NetworkPolicies []networkingv1.NetworkPolicySpec `json:"networkPolicies,omitempty"`
|
||||
LimitRanges []corev1.LimitRangeSpec `json:"limitRanges,omitempty"`
|
||||
ResourceQuota []corev1.ResourceQuotaSpec `json:"resourceQuotas,omitempty"`
|
||||
AdditionalRoleBindings []AdditionalRoleBindings `json:"additionalRoleBindings,omitempty"`
|
||||
ExternalServiceIPs *ExternalServiceIPs `json:"externalServiceIPs,omitempty"`
|
||||
}
|
||||
|
||||
type AdditionalRoleBindings struct {
|
||||
ClusterRoleName string `json:"clusterRoleName"`
|
||||
// kubebuilder:validation:Minimum=1
|
||||
Subjects []rbacv1.Subject `json:"subjects"`
|
||||
}
|
||||
|
||||
// OwnerSpec defines tenant owner name and kind
|
||||
@@ -79,10 +81,8 @@ func (k Kind) String() string {
|
||||
|
||||
// TenantStatus defines the observed state of Tenant
|
||||
type TenantStatus struct {
|
||||
Size uint `json:"size"`
|
||||
Namespaces NamespaceList `json:"namespaces,omitempty"`
|
||||
Users []string `json:"users,omitempty"`
|
||||
Groups []string `json:"groups,omitempty"`
|
||||
Size uint `json:"size"`
|
||||
Namespaces []string `json:"namespaces,omitempty"`
|
||||
}
|
||||
|
||||
// +kubebuilder:object:root=true
|
||||
@@ -92,6 +92,7 @@ type TenantStatus struct {
|
||||
// +kubebuilder:printcolumn:name="Namespace count",type="integer",JSONPath=".status.size",description="The total amount of Namespaces in use"
|
||||
// +kubebuilder:printcolumn:name="Owner name",type="string",JSONPath=".spec.owner.name",description="The assigned Tenant owner"
|
||||
// +kubebuilder:printcolumn:name="Owner kind",type="string",JSONPath=".spec.owner.kind",description="The assigned Tenant owner kind"
|
||||
// +kubebuilder:printcolumn:name="Node selector",type="string",JSONPath=".spec.nodeSelector",description="Node Selector applied to Pods"
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age"
|
||||
|
||||
// Tenant is the Schema for the tenants API
|
||||
|
||||
@@ -23,6 +23,7 @@ package v1alpha1
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -56,63 +57,104 @@ func (in *AdditionalMetadata) DeepCopy() *AdditionalMetadata {
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in IngressClassList) DeepCopyInto(out *IngressClassList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(IngressClassList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList.
|
||||
func (in IngressClassList) DeepCopy() IngressClassList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressClassList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressClassesSpec) DeepCopyInto(out *IngressClassesSpec) {
|
||||
func (in *AdditionalRoleBindings) DeepCopyInto(out *AdditionalRoleBindings) {
|
||||
*out = *in
|
||||
if in.Allowed != nil {
|
||||
in, out := &in.Allowed, &out.Allowed
|
||||
*out = make(IngressClassList, len(*in))
|
||||
if in.Subjects != nil {
|
||||
in, out := &in.Subjects, &out.Subjects
|
||||
*out = make([]rbacv1.Subject, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassesSpec.
|
||||
func (in *IngressClassesSpec) DeepCopy() *IngressClassesSpec {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalRoleBindings.
|
||||
func (in *AdditionalRoleBindings) DeepCopy() *AdditionalRoleBindings {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressClassesSpec)
|
||||
out := new(AdditionalRoleBindings)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in NamespaceList) DeepCopyInto(out *NamespaceList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(NamespaceList, len(*in))
|
||||
func (in *AllowedListSpec) DeepCopyInto(out *AllowedListSpec) {
|
||||
*out = *in
|
||||
if in.Exact != nil {
|
||||
in, out := &in.Exact, &out.Exact
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
|
||||
func (in NamespaceList) DeepCopy() NamespaceList {
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedListSpec.
|
||||
func (in *AllowedListSpec) DeepCopy() *AllowedListSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespaceList)
|
||||
out := new(AllowedListSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExternalServiceIPs) DeepCopyInto(out *ExternalServiceIPs) {
|
||||
*out = *in
|
||||
if in.Allowed != nil {
|
||||
in, out := &in.Allowed, &out.Allowed
|
||||
*out = make([]AllowedIP, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalServiceIPs.
|
||||
func (in *ExternalServiceIPs) DeepCopy() *ExternalServiceIPs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExternalServiceIPs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in IngressHostnamesList) DeepCopyInto(out *IngressHostnamesList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(IngressHostnamesList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressHostnamesList.
|
||||
func (in IngressHostnamesList) DeepCopy() IngressHostnamesList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressHostnamesList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IngressHostnamesSpec) DeepCopyInto(out *IngressHostnamesSpec) {
|
||||
*out = *in
|
||||
if in.Allowed != nil {
|
||||
in, out := &in.Allowed, &out.Allowed
|
||||
*out = make(IngressHostnamesList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressHostnamesSpec.
|
||||
func (in *IngressHostnamesSpec) DeepCopy() *IngressHostnamesSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IngressHostnamesSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerSpec) DeepCopyInto(out *OwnerSpec) {
|
||||
*out = *in
|
||||
@@ -128,45 +170,6 @@ func (in *OwnerSpec) DeepCopy() *OwnerSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in StorageClassList) DeepCopyInto(out *StorageClassList) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(StorageClassList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList.
|
||||
func (in StorageClassList) DeepCopy() StorageClassList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StorageClassList)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StorageClassesSpec) DeepCopyInto(out *StorageClassesSpec) {
|
||||
*out = *in
|
||||
if in.Allowed != nil {
|
||||
in, out := &in.Allowed, &out.Allowed
|
||||
*out = make(StorageClassList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassesSpec.
|
||||
func (in *StorageClassesSpec) DeepCopy() *StorageClassesSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StorageClassesSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Tenant) DeepCopyInto(out *Tenant) {
|
||||
*out = *in
|
||||
@@ -230,10 +233,33 @@ func (in *TenantList) DeepCopyObject() runtime.Object {
|
||||
func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
*out = *in
|
||||
out.Owner = in.Owner
|
||||
if in.NamespaceQuota != nil {
|
||||
in, out := &in.NamespaceQuota, &out.NamespaceQuota
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
in.NamespacesMetadata.DeepCopyInto(&out.NamespacesMetadata)
|
||||
in.ServicesMetadata.DeepCopyInto(&out.ServicesMetadata)
|
||||
in.StorageClasses.DeepCopyInto(&out.StorageClasses)
|
||||
in.IngressClasses.DeepCopyInto(&out.IngressClasses)
|
||||
if in.StorageClasses != nil {
|
||||
in, out := &in.StorageClasses, &out.StorageClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressClasses != nil {
|
||||
in, out := &in.IngressClasses, &out.IngressClasses
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IngressHostnames != nil {
|
||||
in, out := &in.IngressHostnames, &out.IngressHostnames
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ContainerRegistries != nil {
|
||||
in, out := &in.ContainerRegistries, &out.ContainerRegistries
|
||||
*out = new(AllowedListSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = make(map[string]string, len(*in))
|
||||
@@ -262,6 +288,18 @@ func (in *TenantSpec) DeepCopyInto(out *TenantSpec) {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalRoleBindings != nil {
|
||||
in, out := &in.AdditionalRoleBindings, &out.AdditionalRoleBindings
|
||||
*out = make([]AdditionalRoleBindings, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExternalServiceIPs != nil {
|
||||
in, out := &in.ExternalServiceIPs, &out.ExternalServiceIPs
|
||||
*out = new(ExternalServiceIPs)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TenantSpec.
|
||||
@@ -279,16 +317,6 @@ func (in *TenantStatus) DeepCopyInto(out *TenantStatus) {
|
||||
*out = *in
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make(NamespaceList, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Users != nil {
|
||||
in, out := &in.Users, &out.Users
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
|
||||
3
assets/capsule-operator.svg
Normal file
|
After Width: | Height: | Size: 29 KiB |
@@ -1 +0,0 @@
|
||||
Icons made by [Roundicons](https://www.flaticon.com/authors/roundicons) from [www.flaticon.com](https://www.flaticon.com).
|
||||
BIN
assets/logo/capsule.png
Normal file
|
After Width: | Height: | Size: 91 KiB |
101
assets/logo/capsule.svg
Normal file
@@ -0,0 +1,101 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 24.2.1, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Livello_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 595.28 841.89" style="enable-background:new 0 0 595.28 841.89;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#274872;}
|
||||
.st1{fill:#314A70;}
|
||||
.st2{fill:#5783AB;}
|
||||
.st3{fill:#EAECEC;}
|
||||
</style>
|
||||
<path class="st0" d="M243.53,178.65c-0.06-4.5-0.37-9.02,0-13.49c0.1-1.22,2.13-3.09,3.45-3.25c6.99-0.88,14.03-1.47,21.07-1.8
|
||||
c2.43-0.12,3.48-1.05,4.29-3.12c2-5.14,4.08-10.25,6.32-15.29c0.86-1.93,0.56-2.83-1.2-4.09c-4.42-3.15-4.97-8.41-1.6-12.08
|
||||
c3.7-4.04,8.88-4.09,12.65-0.12c3.5,3.68,3.07,8.88-1.39,12.08c-1.93,1.39-2.08,2.44-1.22,4.44c2.19,5.06,3.96,10.31,6.33,15.27
|
||||
c0.65,1.37,2.73,2.73,4.28,2.89c7.57,0.77,15.19,1.17,22.79,1.64c2.69,0.16,4.13,1.28,4.21,4.15c0.1,3.95,0.43,7.89,0.66,11.84
|
||||
c-1.51,0.05-3.03,0.22-4.53,0.13c-12.54-0.76-37.47-2.65-37.47-2.65S254.81,177.52,243.53,178.65z"/>
|
||||
<g>
|
||||
<path class="st1" d="M73.32,483.91c-5.2-2.69-9.26-6.43-12.18-11.22c-2.92-4.78-4.38-10.21-4.38-16.28c0-6.07,1.46-11.5,4.38-16.28
|
||||
c2.92-4.78,6.98-8.52,12.18-11.22c5.2-2.69,11.06-4.04,17.59-4.04c6.45,0,12.09,1.35,16.91,4.04c4.82,2.7,8.33,6.55,10.53,11.56
|
||||
l-13.78,7.4c-3.19-5.62-7.78-8.43-13.78-8.43c-4.63,0-8.47,1.52-11.5,4.55c-3.04,3.04-4.55,7.17-4.55,12.41
|
||||
c0,5.24,1.52,9.38,4.55,12.41c3.04,3.04,6.87,4.55,11.5,4.55c6.07,0,10.66-2.81,13.78-8.43l13.78,7.52
|
||||
c-2.2,4.86-5.71,8.65-10.53,11.39c-4.82,2.73-10.46,4.1-16.91,4.1C84.38,487.95,78.52,486.6,73.32,483.91z"/>
|
||||
<path class="st1" d="M175.17,431.64c5.08,4.52,7.63,11.33,7.63,20.44v34.96h-16.62v-7.63c-3.34,5.69-9.56,8.54-18.67,8.54
|
||||
c-4.71,0-8.79-0.8-12.24-2.39c-3.46-1.59-6.09-3.79-7.91-6.6c-1.82-2.81-2.73-6-2.73-9.56c0-5.69,2.14-10.17,6.43-13.44
|
||||
c4.29-3.26,10.91-4.9,19.87-4.9h14.12c0-3.87-1.18-6.85-3.53-8.94c-2.35-2.09-5.88-3.13-10.59-3.13c-3.26,0-6.47,0.51-9.62,1.54
|
||||
c-3.15,1.03-5.83,2.41-8.03,4.16l-6.38-12.41c3.34-2.35,7.34-4.17,12.01-5.47c4.67-1.29,9.47-1.94,14.4-1.94
|
||||
C162.8,424.87,170.08,427.13,175.17,431.64z M160.03,473.89c2.35-1.4,4.02-3.47,5.01-6.21v-6.26h-12.18
|
||||
c-7.29,0-10.93,2.39-10.93,7.17c0,2.28,0.89,4.08,2.68,5.41c1.78,1.33,4.23,1.99,7.34,1.99
|
||||
C154.98,475.99,157.67,475.29,160.03,473.89z"/>
|
||||
<path class="st1" d="M250.6,428.8c4.67,2.62,8.33,6.3,10.99,11.04c2.66,4.75,3.99,10.27,3.99,16.57s-1.33,11.82-3.99,16.57
|
||||
c-2.66,4.75-6.32,8.43-10.99,11.04s-9.85,3.93-15.54,3.93c-7.82,0-13.97-2.47-18.45-7.4v28.58h-17.76v-83.35h16.97v7.06
|
||||
c4.4-5.31,10.82-7.97,19.24-7.97C240.76,424.87,245.94,426.18,250.6,428.8z M243.2,468.76c2.92-3.07,4.38-7.19,4.38-12.35
|
||||
s-1.46-9.28-4.38-12.35c-2.92-3.07-6.66-4.61-11.22-4.61s-8.29,1.54-11.22,4.61c-2.92,3.07-4.38,7.19-4.38,12.35
|
||||
s1.46,9.28,4.38,12.35c2.92,3.07,6.66,4.61,11.22,4.61S240.28,471.84,243.2,468.76z"/>
|
||||
<path class="st1" d="M283.11,486.07c-4.86-1.25-8.73-2.83-11.61-4.73l5.92-12.75c2.73,1.75,6.03,3.17,9.91,4.27
|
||||
c3.87,1.1,7.67,1.65,11.39,1.65c7.51,0,11.27-1.86,11.27-5.58c0-1.75-1.03-3-3.07-3.76c-2.05-0.76-5.2-1.4-9.45-1.94
|
||||
c-5.01-0.76-9.15-1.63-12.41-2.62c-3.26-0.99-6.09-2.73-8.48-5.24s-3.59-6.07-3.59-10.7c0-3.87,1.12-7.3,3.36-10.3
|
||||
c2.24-3,5.5-5.33,9.79-7c4.29-1.67,9.35-2.5,15.2-2.5c4.33,0,8.63,0.48,12.92,1.42c4.29,0.95,7.84,2.26,10.65,3.93l-5.92,12.64
|
||||
c-5.39-3.04-11.27-4.55-17.65-4.55c-3.8,0-6.64,0.53-8.54,1.59c-1.9,1.06-2.85,2.43-2.85,4.1c0,1.9,1.02,3.23,3.07,3.99
|
||||
c2.05,0.76,5.31,1.48,9.79,2.16c5.01,0.84,9.11,1.73,12.3,2.68c3.19,0.95,5.96,2.68,8.31,5.18c2.35,2.5,3.53,6,3.53,10.48
|
||||
c0,3.8-1.14,7.17-3.42,10.13c-2.28,2.96-5.6,5.26-9.96,6.89c-4.37,1.63-9.55,2.45-15.54,2.45
|
||||
C292.94,487.95,287.97,487.32,283.11,486.07z"/>
|
||||
<path class="st1" d="M399.59,425.78v61.26h-16.85v-7.29c-2.35,2.66-5.16,4.69-8.43,6.09c-3.26,1.4-6.79,2.11-10.59,2.11
|
||||
c-8.05,0-14.42-2.31-19.13-6.95c-4.71-4.63-7.06-11.5-7.06-20.61v-34.61h17.76v32c0,9.87,4.14,14.8,12.41,14.8
|
||||
c4.25,0,7.67-1.38,10.25-4.16c2.58-2.77,3.87-6.89,3.87-12.35v-30.29H399.59z"/>
|
||||
<path class="st1" d="M416.1,402.55h17.76v84.49H416.1V402.55z"/>
|
||||
<path class="st1" d="M510.04,461.42H463.7c0.83,3.8,2.81,6.79,5.92,9c3.11,2.2,6.98,3.3,11.61,3.3c3.19,0,6.01-0.47,8.48-1.42
|
||||
c2.47-0.95,4.76-2.45,6.89-4.5l9.45,10.25c-5.77,6.6-14.2,9.91-25.28,9.91c-6.91,0-13.02-1.35-18.33-4.04
|
||||
c-5.31-2.69-9.41-6.43-12.3-11.22c-2.89-4.78-4.33-10.21-4.33-16.28c0-6,1.42-11.4,4.27-16.23c2.85-4.82,6.76-8.58,11.73-11.27
|
||||
c4.97-2.69,10.53-4.04,16.68-4.04c6,0,11.42,1.29,16.28,3.87c4.86,2.58,8.67,6.28,11.44,11.1c2.77,4.82,4.16,10.42,4.16,16.79
|
||||
C510.38,456.86,510.27,458.46,510.04,461.42z M468.48,441.72c-2.73,2.28-4.4,5.39-5.01,9.34h30.17c-0.61-3.87-2.28-6.96-5.01-9.28
|
||||
c-2.73-2.31-6.07-3.47-10.02-3.47C474.59,438.3,471.21,439.44,468.48,441.72z"/>
|
||||
</g>
|
||||
<g>
|
||||
<g>
|
||||
<path class="st2" d="M144.97,316.25c2.88-4.14,5.7-8.31,8.68-12.38c0.84-1.14,2.13-1.94,3.22-2.9c8.67,2.77,17.24,5.98,26.06,8.18
|
||||
c7.28,1.81,7.49,1.33,11.08-5.55c9.52-18.28,18.99-36.58,28.42-54.91c3.55-6.9,7.04-13.85,10.34-20.87c1.87-3.99,1-5.28-3.27-5.1
|
||||
c-5.07,0.21-10.13,0.68-15.19,1.04c1.72-2.35,3.24-4.87,5.2-7.01c4.47-4.88,9.14-9.57,13.74-14.34c1.84-0.03,3.68,0.02,5.52-0.1
|
||||
c14.62-1.03,29.24-2.1,43.86-3.16c-0.08,0.84-0.24,1.68-0.24,2.52c0.01,48.41,0.03,96.83,0.05,145.24
|
||||
c-15.73,0.85-30.48,0.97-47.48-0.65c-16.01-1.04-30.66-3.54-46.6-5.49c-13.64-1.67-26.85-5.2-39.21-11.4
|
||||
c-4.77-2.4-5.86-5.41-4.24-10.45C145.16,318.1,144.96,317.14,144.97,316.25z"/>
|
||||
<path class="st3" d="M282.42,346.9c-0.02-48.41-0.04-96.83-0.05-145.24c0-0.84,0.05-1.64,0.04-2.48
|
||||
c5.63,0.1,11.47-0.06,17.08,0.32c11.35,0.78,22.67,1.83,34.01,2.77c2.69,3.09,5.47,6.1,8.05,9.28c3.38,4.17,6.61,8.47,9.9,12.71
|
||||
c-6.04-0.52-12.07-1.2-18.13-1.49c-4.12-0.2-4.91,1.24-3.08,4.81c9.87,19.27,19.73,38.54,29.65,57.78
|
||||
c4.02,7.79,8.22,15.49,12.24,23.29c1.46,2.83,3.6,3.9,6.61,3.17c11.52-2.81,23.03-5.68,34.54-8.52c1.8,3.04,3.52,6.13,5.42,9.1
|
||||
c0.89,1.39,2.13,2.56,3.21,3.83c0,0.56-0.19,1.22,0.04,1.66c3.28,6.31-0.16,9.95-5.82,12.53c-14.18,6.44-29.11,9.85-44.52,11.41
|
||||
c-12.89,1.31-25.79,2.51-38.68,3.77c-6.24,0.61-12.47,1.45-18.72,1.79c-4.58,0.24-9.2-0.17-13.81-0.3
|
||||
c-5.95-0.04-11.9-0.08-17.85-0.12L282.42,346.9z"/>
|
||||
<path class="st2" d="M413.28,303.3c-11.51,2.84-23.02,5.71-34.54,8.52c-3.01,0.74-5.15-0.34-6.61-3.17
|
||||
c-4.02-7.79-8.22-15.49-12.24-23.29c-9.92-19.24-19.79-38.51-29.65-57.78c-1.83-3.57-1.04-5.01,3.08-4.81
|
||||
c6.05,0.29,12.09,0.97,18.13,1.49c1.89,0.4,2.54,0.15,5.06,3.74c17.1,24.41,37.01,47.73,54.85,71.62
|
||||
C412.17,300.72,412.64,302.07,413.28,303.3z"/>
|
||||
<path class="st3" d="M155.06,302.38c11.51,2.84,22.26,5.47,33.78,8.28c3.01,0.74,5.15-0.34,6.61-3.17
|
||||
c4.02-7.79,8.22-15.49,12.24-23.29c9.92-19.24,17.3-37.26,26.37-56.7c1.83-3.57,0.68-4.95-3.44-4.75
|
||||
c-6.05,0.29-10.08,0.42-16.13,0.94c-2.11,1.25-2.46,1.66-3.84,3.47c-18.01,23.75-35.83,47.64-53.67,71.53
|
||||
C156.18,299.79,155.7,301.14,155.06,302.38z"/>
|
||||
<path class="st0" d="M421.92,316.24c0,0.56-0.19,1.22,0.04,1.66c3.28,6.31-0.16,9.95-5.82,12.53
|
||||
c-14.18,6.44-29.11,9.85-44.52,11.41c-12.89,1.31-25.79,2.51-38.68,3.77c-6.24,0.61-12.94,1.22-18.94,1.29
|
||||
c-4.59,0.05-8.98,0.32-13.59,0.2c-5.95-0.04-11.9-0.08-17.85-0.12c0,0-0.12-0.08-0.12-0.08c-15.36,0.35-28.73,0.35-46.17-1.19
|
||||
c-15.98-1.41-31.97-2.99-47.91-4.95c-13.64-1.67-26.85-5.2-39.21-11.4c-4.77-2.4-5.86-5.41-4.24-10.45
|
||||
c0.26-0.81,0.06-1.77,0.07-2.66c-6.55,2.47-11.33,6.45-12.86,13.75c-1.74,8.28,0.69,15.31,5.77,21.67
|
||||
c1.43,1.79,2.4,3.22,0.07,5.22c-0.71,0.61-0.81,3.27-0.15,3.89c6.36,6.04,13.89,10.11,22.37,12.36c2.35,0.62,4.12,0.02,4.62-2.85
|
||||
c0.11-0.64,1.63-1.63,2.27-1.49c8.66,1.96,17.26,4.13,25.91,6.14c1.98,0.46,2.73,1,1.52,3.01c-1.45,2.4-0.41,3.92,2,4.93
|
||||
c8.64,3.63,17.82,3.98,26.97,4.34c2.18,0.08,4.54-0.9,3.51-3.88c-1.11-3.22,0.45-3.2,2.83-2.99c8.57,0.73,17.14,1.44,25.72,1.95
|
||||
c3.13,0.19,3.98,1.04,2.41,3.98c-1.6,2.98-0.26,4.76,2.9,4.77c14.82,0.08,29.65,0.17,44.46-0.08c4.59-0.08,5.1-1.29,3.36-5.63
|
||||
c-0.84-2.1-0.97-2.87,1.76-3.02c9.16-0.52,18.32-1.21,27.45-2.12c2.5-0.25,3.06,0.34,2.55,2.56c-0.53,2.31,0.05,4.05,2.72,4.11
|
||||
c9.52,0.21,18.91-0.53,27.82-4.34c1.95-0.83,3.09-2.06,1.71-4.23c-1.72-2.71-0.09-3.15,2.17-3.67c8.24-1.87,16.46-3.83,24.64-5.93
|
||||
c1.82-0.47,3-0.77,3.21,1.6c0.26,2.99,2.1,3.32,4.53,2.61c8.11-2.36,15.55-5.98,21.6-11.99c0.69-0.69,1.03-2.99,0.55-3.39
|
||||
c-3.18-2.71-1.41-4.64,0.51-6.95C437.87,340.92,439.33,322.67,421.92,316.24z"/>
|
||||
</g>
|
||||
</g>
|
||||
<path class="st3" d="M324.35,192.94c-6.72-0.27-13.4-0.35-20.23-0.52c-7.13-0.17-18.9-0.51-18.9-0.51s-1.27,0.04-2.44,0
|
||||
c0,0-0.63-0.01-0.63,0.18c-0.01-5.67,0.01-11.83,0-17.5c12.58,0.95,24.65,1.94,37.19,2.72c1.5,0.09,3.29-0.07,4.8-0.12
|
||||
C324.19,182.43,324.33,187.69,324.35,192.94z"/>
|
||||
<path class="st2" d="M243.35,193.45c6.72-0.27,10.02-0.35,16.86-0.52c7.13-0.17,18.9-0.51,18.9-0.51s1.27,0.04,2.44,0
|
||||
c0,0,0.63-0.53,0.63-0.34c0.01-5.67-0.01-11.83,0-17.5c-12.58,0.95-21.28,1.94-33.82,2.72c-1.5,0.09-3.29-0.07-4.8-0.12
|
||||
C243.51,182.43,243.38,188.21,243.35,193.45z"/>
|
||||
<path class="st0" d="M327.57,193.15c-1.31-0.1-2.62-0.17-3.93-0.26c-13.33-0.32-26.66-0.63-39.99-0.95v0c-0.03,0-0.06,0-0.1,0
|
||||
c-0.03,0-0.06,0-0.1,0v0c-13.33,0.32-26.66,0.63-39.99,0.95c-1.31,0.08-2.62,0.15-3.93,0.26c-6.26,0.5-6.88,1.16-6.73,7.17
|
||||
c0.02,0.7,0.18,1.39,0.27,2.09c1.91-0.03,3.82,0.02,5.72-0.1c14.92-1.02,28.65-2.07,43.57-3.11c14.92,1.04,31.01,2.1,45.93,3.11
|
||||
c1.9,0.13,3.81,0.07,5.72,0.1c0.09-0.7,0.25-1.39,0.27-2.09C334.45,194.31,333.82,193.65,327.57,193.15z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 9.4 KiB |
BIN
assets/logo/capsule_medium.png
Normal file
|
After Width: | Height: | Size: 80 KiB |
BIN
assets/logo/capsule_raw.png
Normal file
|
After Width: | Height: | Size: 88 KiB |
BIN
assets/logo/capsule_small.png
Normal file
|
After Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 18 KiB |
@@ -1,107 +0,0 @@
|
||||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<!-- Generator: Adobe Illustrator 19.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" x="0px" y="0px"
|
||||
viewBox="0 0 504.123 504.123" style="enable-background:new 0 0 504.123 504.123;" xml:space="preserve">
|
||||
<path style="fill:#2477AA;" d="M265.665,468.582c0.378-1.276,0.646-2.615,0.646-4.033v-63.827c0-7.483-5.805-13.564-13.162-14.131
|
||||
l0,0c-0.354-0.039-0.709-0.118-1.087-0.11c-7.869,0-14.249,6.372-14.249,14.241v63.835c0,1.41,0.268,2.749,0.646,4.025
|
||||
c-4.112,1.772-7.026,5.868-7.026,10.65v3.812v3.419c0,6.396,5.175,11.587,11.579,11.587h18.093c6.396,0,11.571-5.183,11.571-11.587
|
||||
v-7.231C272.675,474.451,269.777,470.355,265.665,468.582z"/>
|
||||
<ellipse style="fill:#7BC6C6;" cx="252.062" cy="85.851" rx="7.404" ry="67.245"/>
|
||||
<circle style="fill:#FF7F00;" cx="252.062" cy="269.722" r="183.863"/>
|
||||
<path style="fill:#FF5B00;" d="M252.062,85.858c101.541,0,183.863,82.322,183.863,183.863c0,101.557-82.322,183.879-183.863,183.879
|
||||
"/>
|
||||
<path style="fill:#25618E;" d="M110.222,386.733h283.672c25.647-31.051,41.173-70.719,41.874-113.971H68.348
|
||||
C69.065,316.014,84.582,355.675,110.222,386.733z"/>
|
||||
<g>
|
||||
<path style="fill:#18456D;" d="M252.062,272.762v113.971h141.832c0-0.008,0.024-0.024,0.031-0.039
|
||||
c0.055-0.063,0.095-0.142,0.165-0.197c3.411-4.151,6.609-8.476,9.657-12.926c1.166-1.686,2.198-3.45,3.277-5.167
|
||||
c1.827-2.851,3.631-5.742,5.309-8.704c1.26-2.229,2.41-4.537,3.584-6.829c1.276-2.505,2.489-5.049,3.671-7.625
|
||||
c1.197-2.67,2.355-5.38,3.426-8.113c0.874-2.221,1.678-4.474,2.465-6.735c1.087-3.119,2.15-6.246,3.072-9.429
|
||||
c0.512-1.757,0.922-3.545,1.378-5.325c3.497-13.674,5.585-27.932,5.837-42.646c0-0.079,0-0.15,0.016-0.221H252.062V272.762z"/>
|
||||
<path style="fill:#18456D;" d="M152.174,298.977c0,1.883-1.528,3.426-3.419,3.426h-28.499c-1.883,0-3.419-1.536-3.419-3.426l0,0
|
||||
c0-1.883,1.528-3.426,3.419-3.426h28.499C150.646,295.55,152.174,297.094,152.174,298.977L152.174,298.977z"/>
|
||||
<path style="fill:#18456D;" d="M152.174,318.354c0,1.883-1.528,3.419-3.419,3.419h-28.499c-1.883,0-3.419-1.528-3.419-3.419l0,0
|
||||
c0-1.89,1.528-3.426,3.419-3.426h28.499C150.646,314.927,152.174,316.463,152.174,318.354L152.174,318.354z"/>
|
||||
<path style="fill:#18456D;" d="M152.174,337.723c0,1.89-1.528,3.426-3.419,3.426h-28.499c-1.883,0-3.419-1.528-3.419-3.426l0,0
|
||||
c0-1.883,1.528-3.419,3.419-3.419h28.499C150.646,334.305,152.174,335.841,152.174,337.723L152.174,337.723z"/>
|
||||
</g>
|
||||
<path style="fill:#25618E;" d="M380.109,352.54c0,10.075-8.153,18.235-18.219,18.235h-67.253c-10.059,0-18.219-8.16-18.219-18.235
|
||||
v-45.584c0-10.067,8.16-18.227,18.219-18.227h67.253c10.067,0,18.219,8.16,18.219,18.227V352.54z"/>
|
||||
<g>
|
||||
<path style="fill:#3479A3;" d="M367.577,347.034c0,7.633-6.183,13.832-13.824,13.832h-50.987c-7.641,0-13.832-6.199-13.832-13.832
|
||||
v-34.572c0-7.633,6.191-13.824,13.832-13.824h50.987c7.641,0,13.824,6.191,13.824,13.824V347.034z"/>
|
||||
<path style="fill:#3479A3;" d="M289.666,81.865c0,7.239-5.868,13.107-13.107,13.107h-49.01c-7.231,0-13.099-5.868-13.099-13.107
|
||||
l0,0c0-7.239,5.868-13.107,13.099-13.107h49.01C283.798,68.758,289.666,74.626,289.666,81.865L289.666,81.865z"/>
|
||||
</g>
|
||||
<path style="fill:#18456D;" d="M276.559,68.758c7.239,0,13.107,5.868,13.107,13.107l0,0c0,7.239-5.868,13.107-13.107,13.107h-49.01
|
||||
c-7.231,0-13.099-5.868-13.099-13.107l0,0"/>
|
||||
<circle style="fill:#B4E7ED;" cx="252.062" cy="152.718" r="14.438"/>
|
||||
<path style="fill:#7BC6C6;" d="M252.062,138.279c7.979,0,14.438,6.467,14.438,14.438s-6.459,14.438-14.438,14.438"/>
|
||||
<circle style="fill:#B4E7ED;" cx="252.062" cy="198.309" r="14.438"/>
|
||||
<path style="fill:#7BC6C6;" d="M252.062,183.871c7.979,0,14.438,6.459,14.438,14.438c0,7.971-6.459,14.438-14.438,14.438"/>
|
||||
<circle style="fill:#B4E7ED;" cx="252.069" cy="14.438" r="14.438"/>
|
||||
<path style="fill:#7BC6C6;" d="M262.262,4.23c5.648,5.64,5.648,14.785,0.016,20.417c-5.64,5.632-14.785,5.632-20.417,0"/>
|
||||
<circle style="fill:#B4E7ED;" cx="252.062" cy="243.893" r="14.438"/>
|
||||
<g>
|
||||
<path style="fill:#7BC6C6;" d="M252.062,229.455c7.979,0,14.438,6.467,14.438,14.438s-6.459,14.438-14.438,14.438"/>
|
||||
<path style="fill:#7BC6C6;" d="M353.319,332.312c0,2.056-1.646,3.71-3.694,3.71h-13.107c-2.048,0-3.71-1.654-3.71-3.71l0,0
|
||||
c0-2.048,1.662-3.702,3.71-3.702h13.107C351.673,328.609,353.319,330.264,353.319,332.312L353.319,332.312z"/>
|
||||
</g>
|
||||
<path style="fill:#FF5B00;" d="M185.194,440.95c-0.457-18.692-14.612-33.705-32.106-33.705c-6.231,0-12.012,2.001-16.951,5.309
|
||||
C150.772,424.432,167.329,433.995,185.194,440.95z"/>
|
||||
<path style="fill:#7BC6C6;" d="M161.225,412.782c5.561,5.569,5.561,14.588,0,20.157l-45.127,45.127
|
||||
c-5.569,5.569-14.588,5.569-20.149,0l0,0c-5.561-5.553-5.561-14.58,0-20.149l45.135-45.127
|
||||
C146.637,407.221,155.656,407.221,161.225,412.782L161.225,412.782z"/>
|
||||
<path style="fill:#8DD8D6;" d="M141.084,412.782l-45.135,45.127c-1,1.008-1.78,2.143-2.41,3.34c0.228,0.276,0.433,0.583,0.693,0.851
|
||||
c5.585,5.569,14.588,5.569,20.157,0l45.127-45.127c1.016-1.008,1.764-2.143,2.41-3.34c-0.236-0.284-0.433-0.583-0.701-0.851
|
||||
C155.656,407.221,146.637,407.221,141.084,412.782z"/>
|
||||
<path style="fill:#25618E;" d="M130.859,485.888c0,10.067-8.153,18.235-18.227,18.235H84.141c-10.059,0-18.235-8.168-18.235-18.235
|
||||
v-11.39c0-10.075,8.176-18.235,18.235-18.235h28.491c10.075,0,18.227,8.16,18.227,18.235V485.888z"/>
|
||||
<path style="fill:#2477AA;" d="M117.752,457.074c-1.638-0.488-3.332-0.819-5.12-0.819H84.141c-10.059,0-18.235,8.16-18.235,18.235
|
||||
v6.018c1.638,0.48,3.332,0.819,5.128,0.819h28.491c10.075,0,18.227-8.16,18.227-18.227V457.074z"/>
|
||||
<path style="fill:#FF7F00;" d="M318.921,440.95c0.465-18.692,14.62-33.705,32.114-33.705c6.223,0,12.012,2.001,16.951,5.309
|
||||
C353.351,424.432,336.786,433.995,318.921,440.95z"/>
|
||||
<path style="fill:#7BC6C6;" d="M342.898,412.782c-5.561,5.569-5.561,14.588,0,20.157l45.127,45.127
|
||||
c5.577,5.569,14.588,5.569,20.157,0l0,0c5.561-5.553,5.561-14.58,0-20.149L363.04,412.79
|
||||
C357.486,407.221,348.459,407.221,342.898,412.782L342.898,412.782z"/>
|
||||
<path style="fill:#8DD8D6;" d="M363.032,412.782l45.143,45.127c1,1.008,1.772,2.143,2.41,3.34c-0.228,0.276-0.433,0.583-0.693,0.851
|
||||
c-5.577,5.569-14.588,5.569-20.165,0L344.6,416.973c-1.016-1.008-1.764-2.143-2.41-3.34c0.236-0.284,0.433-0.583,0.701-0.851
|
||||
C348.459,407.221,357.486,407.221,363.032,412.782z"/>
|
||||
<path style="fill:#25618E;" d="M373.264,485.888c0,10.067,8.153,18.235,18.227,18.235h28.491c10.059,0,18.235-8.168,18.235-18.235
|
||||
v-11.39c0-10.075-8.176-18.235-18.235-18.235h-28.491c-10.075,0-18.227,8.16-18.227,18.235V485.888z"/>
|
||||
<path style="fill:#2477AA;" d="M386.371,457.074c1.638-0.488,3.332-0.819,5.12-0.819h28.491c10.059,0,18.235,8.16,18.235,18.235
|
||||
v6.018c-1.638,0.48-3.332,0.819-5.128,0.819h-28.491c-10.075,0-18.227-8.16-18.227-18.227V457.074z"/>
|
||||
<path style="fill:#B4E7ED;" d="M72.428,230.747c1.788,0.591,3.679,0.985,5.671,0.985h98.013c10.067,0,18.219-8.168,18.219-18.235
|
||||
V95.232C133.152,115.468,86.221,166.896,72.428,230.747z"/>
|
||||
<path style="fill:#7BC6C6;" d="M78.1,231.731h98.013c10.067,0,18.219-8.168,18.219-18.235V95.232"/>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
<g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 7.1 KiB |
|
Before Width: | Height: | Size: 1.3 KiB |
|
Before Width: | Height: | Size: 2.3 KiB |
|
Before Width: | Height: | Size: 4.4 KiB |
23
charts/capsule/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
28
charts/capsule/Chart.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: v2
|
||||
type: application
|
||||
description: A Helm chart to deploy the Capsule Operator for easily implementing,
|
||||
managing, and maintaining mutitenancy and access control in Kubernetes.
|
||||
home: https://github.com/clastix/capsule
|
||||
icon: https://github.com/clastix/capsule/raw/master/assets/logo/capsule_small.png
|
||||
keywords:
|
||||
- kubernetes
|
||||
- operator
|
||||
- multi-tenancy
|
||||
- multi-tenant
|
||||
- multitenancy
|
||||
- multitenant
|
||||
- namespace
|
||||
maintainers:
|
||||
- email: hello@clastix.io
|
||||
name: Clastix Labs Team
|
||||
name: capsule
|
||||
sources:
|
||||
- https://github.com/clastix/capsule
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.0.10
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.0.4
|
||||
122
charts/capsule/README.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# Deploying the Capsule Operator
|
||||
|
||||
Use the Capsule Operator for easily implementing, managing, and maintaining mutitenancy and access control in Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
* [Helm 3](https://github.com/helm/helm/releases) is required when installing the Capsule Operator chart. Follow Helm’s official [steps](https://helm.sh/docs/intro/install/) for installing helm on your particular operating system.
|
||||
|
||||
* A Kubernetes cluster 1.16+ with following [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled:
|
||||
|
||||
* PodNodeSelector
|
||||
* LimitRanger
|
||||
* ResourceQuota
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
* A [`kubeconfig`](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) file accessing the Kubernetes cluster with cluster admin permissions.
|
||||
|
||||
## Quick Start
|
||||
|
||||
The Capsule Operator Chart can be used to instantly deploy the Capsule Operator on your Kubernetes cluster.
|
||||
|
||||
1. Add this repository:
|
||||
|
||||
$ helm repo add clastix https://clastix.github.io/charts
|
||||
|
||||
2. Install the Chart:
|
||||
|
||||
$ helm install capsule clastix/capsule -n capsule-system
|
||||
|
||||
3. Show the status:
|
||||
|
||||
$ helm status capsule -n capsule-system
|
||||
|
||||
4. Upgrade the Chart
|
||||
|
||||
$ helm upgrade capsule clastix/capsule -n capsule-system
|
||||
|
||||
5. Uninstall the Chart
|
||||
|
||||
$ helm uninstall capsule -n capsule-system
|
||||
|
||||
## Customize the installation
|
||||
|
||||
There are two methods for specifying overrides of values during chart installation: `--values` and `--set`.
|
||||
|
||||
The `--values` option is the preferred method because it allows you to keep your overrides in a YAML file, rather than specifying them all on the command line. Create a copy of the YAML file `values.yaml` and add your overrides to it.
|
||||
|
||||
Specify your overrides file when you install the chart:
|
||||
|
||||
$ helm install capsule capsule-helm-chart --values myvalues.yaml -n capsule-system
|
||||
|
||||
The values in your overrides file `myvalues.yaml` will override their counterparts in the chart’s values.yaml file. Any values in `values.yaml` that weren’t overridden will keep their defaults.
|
||||
|
||||
If you only need to make minor customizations, you can specify them on the command line by using the `--set` option. For example:
|
||||
|
||||
$ helm install capsule capsule-helm-chart --set force_tenant_prefix=false -n capsule-system
|
||||
|
||||
Here the values you can override:
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`manager.options.logLevel` | Set the log verbosity of the controller with a value from 1 to 10.| `4`
|
||||
`manager.options.forceTenantPrefix` | Boolean, enforces the Tenant owner, during Namespace creation, to name it using the selected Tenant name as prefix, separated by a dash | `false`
|
||||
`manager.options.capsuleUserGroup` | Override the Capsule user group | `capsule.clastix.io`
|
||||
`manager.options.protectedNamespaceRegex` | If specified, disallows creation of namespaces matching the passed regexp | `null`
|
||||
`manager.image.repository` | Set the image repository of the controller. | `quay.io/clastix/capsule`
|
||||
`manager.image.tag` | Overrides the image tag whose default is the chart. `appVersion` | `null`
|
||||
`manager.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
`manager.resources.requests/cpu` | Set the CPU requests assigned to the controller. | `200m`
|
||||
`manager.resources.requests/memory` | Set the memory requests assigned to the controller. | `128Mi`
|
||||
`manager.resources.limits/cpu` | Set the CPU limits assigned to the controller. | `200m`
|
||||
`manager.resources.limits/cpu` | Set the memory limits assigned to the controller. | `128Mi`
|
||||
`proxy.image.repository` | Set the image repository of the rbac proxy. | `gcr.io/kubebuilder/kube-rbac-proxy`
|
||||
`proxy.image.tag` | Set the image tag of the rbac proxy. | `v0.5.0`
|
||||
`proxy.image.pullPolicy` | Set the image pull policy. | `IfNotPresent`
|
||||
`proxy.resources.requests/cpu` | Set the CPU requests assigned to the rbac proxy. | `10m`
|
||||
`proxy.resources.requests/memory` | Set the memory requests assigned to the rbac proxy. | `64Mi`
|
||||
`proxy.resources.limits/cpu` | Set the CPU limits assigned to the rbac proxy. | `100m`
|
||||
`proxy.resources.limits/cpu` | Set the memory limits assigned to the rbac proxy. | `128Mi`
|
||||
`mutatingWebhooksTimeoutSeconds` | Timeout in seconds for mutating webhooks. | `30`
|
||||
`validatingWebhooksTimeoutSeconds` | Timeout in seconds for validating webhooks. | `30`
|
||||
`imagePullSecrets` | Configuration for `imagePullSecrets` so that you can use a private images registry. | `[]`
|
||||
`serviceAccount.create` | Specifies whether a service account should be created. | `true`
|
||||
`serviceAccount.annotations` | Annotations to add to the service account. | `{}`
|
||||
`serviceAccount.name` | The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template | `capsule`
|
||||
`podAnnotations` | Annotations to add to the Capsule pod. | `{}`
|
||||
`priorityClassName` | Set the priority class name of the Capsule pod. | `null`
|
||||
`nodeSelector` | Set the node selector for the Capsule pod. | `{}`
|
||||
`tolerations` | Set list of tolerations for the Capsule pod. | `[]`
|
||||
`replicaCount` | Set the replica count for Capsule pod. | `1`
|
||||
`affinity` | Set affinity rules for the Capsule pod. | `{}`
|
||||
`podSecurityPolicy.enabled` | Specify if a Pod Security Policy must be created. | `false`
|
||||
|
||||
## Created resources
|
||||
|
||||
This Helm Chart cretes the following Kubernetes resources in the release namespace:
|
||||
|
||||
* Capsule Namespace
|
||||
* Capsule Operator Deployment
|
||||
* Capsule Service
|
||||
* CA Secret
|
||||
* Certfificate Secret
|
||||
* Tenant Custom Resource Definition
|
||||
* MutatingWebHookConfiguration
|
||||
* ValidatingWebHookConfiguration
|
||||
* RBAC Cluster Roles
|
||||
* Metrics Service
|
||||
|
||||
And optionally, depending on the values set:
|
||||
|
||||
* Capsule ServiceAccount
|
||||
* PodSecurityPolicy
|
||||
* RBAC ClusterRole and RoleBinding for pod security policy
|
||||
|
||||
## Notes on installing Custom Resource Definitions with Helm3
|
||||
|
||||
Capsule, as many other add-ons, defines its own set of Custom Resource Definitions (CRDs). Helm3 removed the old CRDs installation method for a more simple methodology. In the Helm Chart, there is now a special directory called `crds` to hold the CRDs. These CRDs are not templated, but will be installed by default when running a `helm install` for the chart. If the CRDs already exist (for example, you already executed `helm install`), it will be skipped with a warning. When you wish to skip the CRDs installation, and do not see the warning, you can pass the `--skip-crds` flag to the `helm install` command.
|
||||
|
||||
## More
|
||||
|
||||
See Capsule [use cases](https://github.com/clastix/capsule/blob/master/use_cases.md) for more information about how to use Capsule.
|
||||
823
charts/capsule/crds/tenant-crd.yaml
Normal file
@@ -0,0 +1,823 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.3.0
|
||||
creationTimestamp: null
|
||||
name: tenants.capsule.clastix.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.namespaceQuota
|
||||
description: The max amount of Namespaces can be created
|
||||
name: Namespace quota
|
||||
type: integer
|
||||
- JSONPath: .status.size
|
||||
description: The total amount of Namespaces in use
|
||||
name: Namespace count
|
||||
type: integer
|
||||
- JSONPath: .spec.owner.name
|
||||
description: The assigned Tenant owner
|
||||
name: Owner name
|
||||
type: string
|
||||
- JSONPath: .spec.owner.kind
|
||||
description: The assigned Tenant owner kind
|
||||
name: Owner kind
|
||||
type: string
|
||||
- JSONPath: .spec.nodeSelector
|
||||
description: Node Selector applied to Pods
|
||||
name: Node selector
|
||||
type: string
|
||||
- JSONPath: .metadata.creationTimestamp
|
||||
description: Age
|
||||
name: Age
|
||||
type: date
|
||||
group: capsule.clastix.io
|
||||
names:
|
||||
kind: Tenant
|
||||
listKind: TenantList
|
||||
plural: tenants
|
||||
shortNames:
|
||||
- tnt
|
||||
singular: tenant
|
||||
preserveUnknownFields: false
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: Tenant is the Schema for the tenants API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
items:
|
||||
properties:
|
||||
clusterRoleName:
|
||||
type: string
|
||||
subjects:
|
||||
description: kubebuilder:validation:Minimum=1
|
||||
items:
|
||||
description: Subject contains a reference to the object or user
|
||||
identities a role binding applies to. This can either hold
|
||||
a direct API object reference, or a value for non-objects
|
||||
such as user and group names.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup holds the API group of the referenced
|
||||
subject. Defaults to "" for ServiceAccount subjects. Defaults
|
||||
to "rbac.authorization.k8s.io" for User and Group subjects.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of object being referenced. Values defined
|
||||
by this API group are "User", "Group", and "ServiceAccount".
|
||||
If the Authorizer does not recognized the kind value,
|
||||
the Authorizer should report an error.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the object being referenced.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace of the referenced object. If the
|
||||
object kind is non-namespace, such as "User" or "Group",
|
||||
and this value is not empty the Authorizer should report
|
||||
an error.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- clusterRoleName
|
||||
- subjects
|
||||
type: object
|
||||
type: array
|
||||
containerRegistries:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
ingressClasses:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
ingressHostnames:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
items:
|
||||
description: LimitRangeSpec defines a min/max usage limit for resources
|
||||
that match on kind.
|
||||
properties:
|
||||
limits:
|
||||
description: Limits is the list of LimitRangeItem objects that
|
||||
are enforced.
|
||||
items:
|
||||
description: LimitRangeItem defines a min/max usage limit for
|
||||
any resource that matches on kind.
|
||||
properties:
|
||||
default:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Default resource requirement limit value by
|
||||
resource name if resource limit is omitted.
|
||||
type: object
|
||||
defaultRequest:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: DefaultRequest is the default resource requirement
|
||||
request value by resource name if resource request is
|
||||
omitted.
|
||||
type: object
|
||||
max:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Max usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
maxLimitRequestRatio:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: MaxLimitRequestRatio if specified, the named
|
||||
resource must have a request and limit that are both non-zero
|
||||
where limit divided by request is less than or equal to
|
||||
the enumerated value; this represents the max burst for
|
||||
the named resource.
|
||||
type: object
|
||||
min:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: Min usage constraints on this kind by resource
|
||||
name.
|
||||
type: object
|
||||
type:
|
||||
description: Type of resource that this limit applies to.
|
||||
type: string
|
||||
required:
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- limits
|
||||
type: object
|
||||
type: array
|
||||
namespaceQuota:
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
networkPolicies:
|
||||
items:
|
||||
description: NetworkPolicySpec provides the specification of a NetworkPolicy
|
||||
properties:
|
||||
egress:
|
||||
description: List of egress rules to be applied to the selected
|
||||
pods. Outgoing traffic is allowed if there are no NetworkPolicies
|
||||
selecting the pod (and cluster policy otherwise allows the traffic),
|
||||
OR if the traffic matches at least one egress rule across all
|
||||
of the NetworkPolicy objects whose podSelector matches the pod.
|
||||
If this field is empty then this NetworkPolicy limits all outgoing
|
||||
traffic (and serves solely to ensure that the pods it selects
|
||||
are isolated by default). This field is beta-level in 1.8
|
||||
items:
|
||||
description: NetworkPolicyEgressRule describes a particular
|
||||
set of traffic that is allowed out of pods matched by a NetworkPolicySpec's
|
||||
podSelector. The traffic must match both ports and to. This
|
||||
type is beta-level in 1.8
|
||||
properties:
|
||||
ports:
|
||||
description: List of destination ports for outgoing traffic.
|
||||
Each item in this list is combined using a logical OR.
|
||||
If this field is empty or missing, this rule matches all
|
||||
ports (traffic not restricted by port). If this field
|
||||
is present and contains at least one item, then this rule
|
||||
allows traffic only if the traffic matches at least one
|
||||
port in the list.
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow
|
||||
traffic on
|
||||
properties:
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This
|
||||
can either be a numerical or named port on a pod.
|
||||
If this field is not provided, this matches all
|
||||
port names and numbers.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
description: The protocol (TCP, UDP, or SCTP) which
|
||||
traffic must match. If not specified, this field
|
||||
defaults to TCP.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
to:
|
||||
description: List of destinations for outgoing traffic of
|
||||
pods selected for this rule. Items in this list are combined
|
||||
using a logical OR operation. If this field is empty or
|
||||
missing, this rule matches all destinations (traffic not
|
||||
restricted by destination). If this field is present and
|
||||
contains at least one item, this rule allows traffic only
|
||||
if the traffic matches at least one item in the to list.
|
||||
items:
|
||||
description: NetworkPolicyPeer describes a peer to allow
|
||||
traffic to/from. Only certain combinations of fields
|
||||
are allowed
|
||||
properties:
|
||||
ipBlock:
|
||||
description: IPBlock defines policy on a particular
|
||||
IPBlock. If this field is set then neither of the
|
||||
other fields can be.
|
||||
properties:
|
||||
cidr:
|
||||
description: CIDR is a string representing the
|
||||
IP Block Valid examples are "192.168.1.1/24"
|
||||
or "2001:db9::/64"
|
||||
type: string
|
||||
except:
|
||||
description: Except is a slice of CIDRs that should
|
||||
not be included within an IP Block Valid examples
|
||||
are "192.168.1.1/24" or "2001:db9::/64" Except
|
||||
values will be rejected if they are outside
|
||||
the CIDR range
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- cidr
|
||||
type: object
|
||||
namespaceSelector:
|
||||
description: "Selects Namespaces using cluster-scoped
|
||||
labels. This field follows standard label selector
|
||||
semantics; if present but empty, it selects all
|
||||
namespaces. \n If PodSelector is also set, then
|
||||
the NetworkPolicyPeer as a whole selects the Pods
|
||||
matching PodSelector in the Namespaces selected
|
||||
by NamespaceSelector. Otherwise it selects all Pods
|
||||
in the Namespaces selected by NamespaceSelector."
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
podSelector:
|
||||
description: "This is a label selector which selects
|
||||
Pods. This field follows standard label selector
|
||||
semantics; if present but empty, it selects all
|
||||
pods. \n If NamespaceSelector is also set, then
|
||||
the NetworkPolicyPeer as a whole selects the Pods
|
||||
matching PodSelector in the Namespaces selected
|
||||
by NamespaceSelector. Otherwise it selects the Pods
|
||||
matching PodSelector in the policy's own Namespace."
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
ingress:
|
||||
description: List of ingress rules to be applied to the selected
|
||||
pods. Traffic is allowed to a pod if there are no NetworkPolicies
|
||||
selecting the pod (and cluster policy otherwise allows the traffic),
|
||||
OR if the traffic source is the pod's local node, OR if the
|
||||
traffic matches at least one ingress rule across all of the
|
||||
NetworkPolicy objects whose podSelector matches the pod. If
|
||||
this field is empty then this NetworkPolicy does not allow any
|
||||
traffic (and serves solely to ensure that the pods it selects
|
||||
are isolated by default)
|
||||
items:
|
||||
description: NetworkPolicyIngressRule describes a particular
|
||||
set of traffic that is allowed to the pods matched by a NetworkPolicySpec's
|
||||
podSelector. The traffic must match both ports and from.
|
||||
properties:
|
||||
from:
|
||||
description: List of sources which should be able to access
|
||||
the pods selected for this rule. Items in this list are
|
||||
combined using a logical OR operation. If this field is
|
||||
empty or missing, this rule matches all sources (traffic
|
||||
not restricted by source). If this field is present and
|
||||
contains at least one item, this rule allows traffic only
|
||||
if the traffic matches at least one item in the from list.
|
||||
items:
|
||||
description: NetworkPolicyPeer describes a peer to allow
|
||||
traffic to/from. Only certain combinations of fields
|
||||
are allowed
|
||||
properties:
|
||||
ipBlock:
|
||||
description: IPBlock defines policy on a particular
|
||||
IPBlock. If this field is set then neither of the
|
||||
other fields can be.
|
||||
properties:
|
||||
cidr:
|
||||
description: CIDR is a string representing the
|
||||
IP Block Valid examples are "192.168.1.1/24"
|
||||
or "2001:db9::/64"
|
||||
type: string
|
||||
except:
|
||||
description: Except is a slice of CIDRs that should
|
||||
not be included within an IP Block Valid examples
|
||||
are "192.168.1.1/24" or "2001:db9::/64" Except
|
||||
values will be rejected if they are outside
|
||||
the CIDR range
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- cidr
|
||||
type: object
|
||||
namespaceSelector:
|
||||
description: "Selects Namespaces using cluster-scoped
|
||||
labels. This field follows standard label selector
|
||||
semantics; if present but empty, it selects all
|
||||
namespaces. \n If PodSelector is also set, then
|
||||
the NetworkPolicyPeer as a whole selects the Pods
|
||||
matching PodSelector in the Namespaces selected
|
||||
by NamespaceSelector. Otherwise it selects all Pods
|
||||
in the Namespaces selected by NamespaceSelector."
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
podSelector:
|
||||
description: "This is a label selector which selects
|
||||
Pods. This field follows standard label selector
|
||||
semantics; if present but empty, it selects all
|
||||
pods. \n If NamespaceSelector is also set, then
|
||||
the NetworkPolicyPeer as a whole selects the Pods
|
||||
matching PodSelector in the Namespaces selected
|
||||
by NamespaceSelector. Otherwise it selects the Pods
|
||||
matching PodSelector in the policy's own Namespace."
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label
|
||||
selector requirements. The requirements are
|
||||
ANDed.
|
||||
items:
|
||||
description: A label selector requirement is
|
||||
a selector that contains values, a key, and
|
||||
an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the
|
||||
selector applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's
|
||||
relationship to a set of values. Valid
|
||||
operators are In, NotIn, Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string
|
||||
values. If the operator is In or NotIn,
|
||||
the values array must be non-empty. If
|
||||
the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array
|
||||
is replaced during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value}
|
||||
pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions,
|
||||
whose key field is "key", the operator is "In",
|
||||
and the values array contains only "value".
|
||||
The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
ports:
|
||||
description: List of ports which should be made accessible
|
||||
on the pods selected for this rule. Each item in this
|
||||
list is combined using a logical OR. If this field is
|
||||
empty or missing, this rule matches all ports (traffic
|
||||
not restricted by port). If this field is present and
|
||||
contains at least one item, then this rule allows traffic
|
||||
only if the traffic matches at least one port in the list.
|
||||
items:
|
||||
description: NetworkPolicyPort describes a port to allow
|
||||
traffic on
|
||||
properties:
|
||||
port:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
description: The port on the given protocol. This
|
||||
can either be a numerical or named port on a pod.
|
||||
If this field is not provided, this matches all
|
||||
port names and numbers.
|
||||
x-kubernetes-int-or-string: true
|
||||
protocol:
|
||||
description: The protocol (TCP, UDP, or SCTP) which
|
||||
traffic must match. If not specified, this field
|
||||
defaults to TCP.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
podSelector:
|
||||
description: Selects the pods to which this NetworkPolicy object
|
||||
applies. The array of ingress rules is applied to any pods selected
|
||||
by this field. Multiple network policies can select the same
|
||||
set of pods. In this case, the ingress rules for each are combined
|
||||
additively. This field is NOT optional and follows standard
|
||||
label selector semantics. An empty podSelector matches all pods
|
||||
in this namespace.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector
|
||||
requirements. The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector
|
||||
that contains values, a key, and an operator that relates
|
||||
the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector
|
||||
applies to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship
|
||||
to a set of values. Valid operators are In, NotIn,
|
||||
Exists and DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If
|
||||
the operator is In or NotIn, the values array must
|
||||
be non-empty. If the operator is Exists or DoesNotExist,
|
||||
the values array must be empty. This array is replaced
|
||||
during a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: object
|
||||
type: array
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: matchLabels is a map of {key,value} pairs. A
|
||||
single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field is "key",
|
||||
the operator is "In", and the values array contains only
|
||||
"value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
policyTypes:
|
||||
description: List of rule types that the NetworkPolicy relates
|
||||
to. Valid options are "Ingress", "Egress", or "Ingress,Egress".
|
||||
If this field is not specified, it will default based on the
|
||||
existence of Ingress or Egress rules; policies that contain
|
||||
an Egress section are assumed to affect Egress, and all policies
|
||||
(whether or not they contain an Ingress section) are assumed
|
||||
to affect Ingress. If you want to write an egress-only policy,
|
||||
you must explicitly specify policyTypes [ "Egress" ]. Likewise,
|
||||
if you want to write a policy that specifies that no egress
|
||||
is allowed, you must specify a policyTypes value that include
|
||||
"Egress" (since such a policy would not include an Egress section
|
||||
and would otherwise default to just [ "Ingress" ]). This field
|
||||
is beta-level in 1.8
|
||||
items:
|
||||
description: Policy Type string describes the NetworkPolicy
|
||||
type This type is beta-level in 1.8
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- podSelector
|
||||
type: object
|
||||
type: array
|
||||
nodeSelector:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
owner:
|
||||
description: OwnerSpec defines tenant owner name and kind
|
||||
properties:
|
||||
kind:
|
||||
enum:
|
||||
- User
|
||||
- Group
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
resourceQuotas:
|
||||
items:
|
||||
description: ResourceQuotaSpec defines the desired hard limits to
|
||||
enforce for Quota.
|
||||
properties:
|
||||
hard:
|
||||
additionalProperties:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: string
|
||||
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
|
||||
x-kubernetes-int-or-string: true
|
||||
description: 'hard is the set of desired hard limits for each
|
||||
named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/'
|
||||
type: object
|
||||
scopeSelector:
|
||||
description: scopeSelector is also a collection of filters like
|
||||
scopes that must match each object tracked by a quota but expressed
|
||||
using ScopeSelectorOperator in combination with possible values.
|
||||
For a resource to match, both scopes AND scopeSelector (if specified
|
||||
in spec), must be matched.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: A list of scope selector requirements by scope
|
||||
of the resources.
|
||||
items:
|
||||
description: A scoped-resource selector requirement is a
|
||||
selector that contains values, a scope name, and an operator
|
||||
that relates the scope name and values.
|
||||
properties:
|
||||
operator:
|
||||
description: Represents a scope's relationship to a
|
||||
set of values. Valid operators are In, NotIn, Exists,
|
||||
DoesNotExist.
|
||||
type: string
|
||||
scopeName:
|
||||
description: The name of the scope that the selector
|
||||
applies to.
|
||||
type: string
|
||||
values:
|
||||
description: An array of string values. If the operator
|
||||
is In or NotIn, the values array must be non-empty.
|
||||
If the operator is Exists or DoesNotExist, the values
|
||||
array must be empty. This array is replaced during
|
||||
a strategic merge patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- operator
|
||||
- scopeName
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
scopes:
|
||||
description: A collection of filters that must match each object
|
||||
tracked by a quota. If not specified, the quota matches all
|
||||
objects.
|
||||
items:
|
||||
description: A ResourceQuotaScope defines a filter that must
|
||||
match each object tracked by a quota
|
||||
type: string
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
servicesMetadata:
|
||||
properties:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
storageClasses:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- owner
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
properties:
|
||||
namespaces:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
size:
|
||||
type: integer
|
||||
required:
|
||||
- size
|
||||
type: object
|
||||
type: object
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
19
charts/capsule/templates/NOTES.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
- Capsule Operator Helm Chart deployed:
|
||||
|
||||
# Check the capsule logs
|
||||
$ kubectl logs -f deployment/{{ template "capsule.fullname" . }}-controller-manager -c manager -n {{ .Release.Namespace }}
|
||||
|
||||
|
||||
# Check the capsule logs
|
||||
$ kubectl logs -f deployment/{{ template "capsule.fullname" . }}-controller-manager -c manager -n{{ .Release.Namespace }}
|
||||
|
||||
- Manage this chart:
|
||||
|
||||
# Upgrade Capsule
|
||||
$ helm upgrade {{ .Release.Name }} -f <values.yaml> capsule -n {{ .Release.Namespace }}
|
||||
|
||||
# Show this status again
|
||||
$ helm status {{ .Release.Name }} -n {{ .Release.Namespace }}
|
||||
|
||||
# Uninstall Capsule
|
||||
$ helm uninstall {{ .Release.Name }} -n {{ .Release.Namespace }}
|
||||
90
charts/capsule/templates/_helpers.tpl
Normal file
@@ -0,0 +1,90 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "capsule.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "capsule.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "capsule.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "capsule.labels" -}}
|
||||
helm.sh/chart: {{ include "capsule.chart" . }}
|
||||
{{ include "capsule.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "capsule.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "capsule.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "capsule.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "capsule.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the fully-qualified Docker image to use
|
||||
*/}}
|
||||
{{- define "capsule.fullyQualifiedDockerImage" -}}
|
||||
{{- printf "%s:%s" .Values.manager.image.repository ( .Values.manager.image.tag | default (printf "v%s" .Chart.AppVersion) ) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule Deployment name to use
|
||||
*/}}
|
||||
{{- define "capsule.deploymentName" -}}
|
||||
{{- printf "%s-controller-manager" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule CA Secret name to use
|
||||
*/}}
|
||||
{{- define "capsule.secretCaName" -}}
|
||||
{{- printf "%s-ca" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the Capsule TLS Secret name to use
|
||||
*/}}
|
||||
{{- define "capsule.secretTlsName" -}}
|
||||
{{- printf "%s-tls" (include "capsule.fullname" .) -}}
|
||||
{{- end }}
|
||||
10
charts/capsule/templates/ca.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
name: {{ include "capsule.secretCaName" . }}
|
||||
data:
|
||||
10
charts/capsule/templates/certs.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
annotations:
|
||||
"helm.sh/hook": "pre-install"
|
||||
"helm.sh/hook-delete-policy": "before-hook-creation"
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
name: {{ include "capsule.secretTlsName" . }}
|
||||
data:
|
||||
97
charts/capsule/templates/deployment.yaml
Normal file
@@ -0,0 +1,97 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "capsule.deploymentName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "capsule.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "capsule.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
priorityClassName: {{ .Values.priorityClassName }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- name: cert
|
||||
secret:
|
||||
defaultMode: 420
|
||||
secretName: {{ include "capsule.fullname" . }}-tls
|
||||
containers:
|
||||
- name: manager
|
||||
command:
|
||||
- /manager
|
||||
args:
|
||||
- --metrics-addr=127.0.0.1:8080
|
||||
- --enable-leader-election
|
||||
- --zap-log-level={{ default 4 .Values.manager.options.logLevel }}
|
||||
{{ if .Values.manager.options.forceTenantPrefix }}- --force-tenant-prefix={{ .Values.manager.options.forceTenantPrefix }}{{ end }}
|
||||
{{ if .Values.manager.options.capsuleUserGroup }}- --capsule-user-group={{ .Values.manager.options.capsuleUserGroup }}{{ end }}
|
||||
{{ if .Values.manager.options.protectedNamespaceRegex }}- --protected-namespace-regex={{ .Values.manager.options.protectedNamespaceRegex }}{{ end }}
|
||||
image: {{ include "capsule.fullyQualifiedDockerImage" . }}
|
||||
imagePullPolicy: {{ .Values.manager.image.pullPolicy }}
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: webhook-server
|
||||
containerPort: 9443
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10080
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 10080
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/k8s-webhook-server/serving-certs
|
||||
name: cert
|
||||
readOnly: true
|
||||
resources:
|
||||
{{- toYaml .Values.manager.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
- name: kube-rbac-proxy
|
||||
image: {{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- --secure-listen-address=0.0.0.0:8443
|
||||
- --upstream=http://127.0.0.1:8080/
|
||||
- --logtostderr=true
|
||||
- --v=10
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.proxy.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
16
charts/capsule/templates/metrics-service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-controller-manager-metrics-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 8443
|
||||
name: https
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
{{- include "capsule.selectorLabels" . | nindent 4 }}
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
34
charts/capsule/templates/mutatingwebhookconfiguration.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: MutatingWebhookConfiguration
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-mutating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /mutate-v1-namespace-owner-reference
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: owner.namespace.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
reinvocationPolicy: Never
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.mutatingWebhooksTimeoutSeconds }}
|
||||
54
charts/capsule/templates/podsecuritypolicy.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
{{- if .Values.podSecurityPolicy.enabled }}
|
||||
kind: PodSecurityPolicy
|
||||
apiVersion: policy/v1beta1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
spec:
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
hostPorts:
|
||||
- max: 0
|
||||
min: 0
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- secret
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-use-psp
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
resourceNames:
|
||||
- {{ include "capsule.fullname" . }}
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-use-psp
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "capsule.fullname" . }}-use-psp
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- end }}
|
||||
38
charts/capsule/templates/post-install-job.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
{{- $cmd := "while [ -z $$(kubectl -n $NAMESPACE get secret capsule-tls -o jsonpath='{.data.tls\\\\.crt}') ];" -}}
|
||||
{{- $cmd = printf "%s do echo 'waiting Capsule to be up and running...' && sleep 5;" $cmd -}}
|
||||
{{- $cmd = printf "%s done" $cmd -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": post-install
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: post-install-job
|
||||
image: "bitnami/kubectl:1.18"
|
||||
command: ["sh", "-c", "{{ $cmd }}"]
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
39
charts/capsule/templates/pre-delete-job.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
{{- $cmd := printf "kubectl scale deployment -n $NAMESPACE %s --replicas 0 &&" (include "capsule.deploymentName" .) -}}
|
||||
{{- $cmd = printf "%s kubectl delete secret -n $NAMESPACE %s %s --ignore-not-found &&" $cmd (include "capsule.secretTlsName" .) (include "capsule.secretCaName" .) -}}
|
||||
{{- $cmd = printf "%s kubectl delete clusterroles.rbac.authorization.k8s.io capsule-namespace-deleter capsule-namespace-provisioner --ignore-not-found &&" $cmd -}}
|
||||
{{- $cmd = printf "%s kubectl delete clusterrolebindings.rbac.authorization.k8s.io capsule-namespace-provisioner --ignore-not-found" $cmd -}}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
annotations:
|
||||
# This is what defines this resource as a hook. Without this line, the
|
||||
# job is considered part of the release.
|
||||
"helm.sh/hook": pre-delete
|
||||
"helm.sh/hook-weight": "-5"
|
||||
"helm.sh/hook-delete-policy": hook-succeeded
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
name: "{{ .Release.Name }}"
|
||||
labels:
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: pre-delete-job
|
||||
image: "bitnami/kubectl:1.18"
|
||||
command: [ "sh", "-c", "{{ $cmd }}"]
|
||||
env:
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
61
charts/capsule/templates/rbac.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-role
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-metrics-reader
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-proxy-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "capsule.fullname" . }}-proxy-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-manager-rolebinding
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
12
charts/capsule/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "capsule.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
266
charts/capsule/templates/validatingwebhookconfiguration.yaml
Normal file
@@ -0,0 +1,266 @@
|
||||
apiVersion: admissionregistration.k8s.io/v1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-validating-webhook-configuration
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
webhooks:
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-ingress
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: ingress-v1beta1.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
- extensions
|
||||
apiVersions:
|
||||
- v1beta1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-ingress
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: ingress-v1.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- ingresses
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validate-v1-namespace-quota
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: quota.namespace.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-v1-network-policy
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: validating.network-policy.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
- DELETE
|
||||
resources:
|
||||
- networkpolicies
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-v1-pvc
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: pvc.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-v1-tenant
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: tenant.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- capsule.clastix.io
|
||||
apiVersions:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenants
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-v1-namespace-tenant-prefix
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: prefix.namespace.capsule.clastix.io
|
||||
namespaceSelector: {}
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- namespaces
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-v1-registry
|
||||
port: 443
|
||||
failurePolicy: Ignore
|
||||
matchPolicy: Exact
|
||||
name: pod.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- pods
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
- admissionReviewVersions:
|
||||
- v1beta1
|
||||
clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
namespace: {{ .Release.Namespace }}
|
||||
path: /validating-external-service-ips
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Exact
|
||||
name: validating-external-service-ips.capsule.clastix.io
|
||||
namespaceSelector:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
objectSelector: {}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- services
|
||||
scope: '*'
|
||||
sideEffects: NoneOnDryRun
|
||||
timeoutSeconds: {{ .Values.validatingWebhooksTimeoutSeconds }}
|
||||
16
charts/capsule/templates/webhook-service.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "capsule.fullname" . }}-webhook-service
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
spec:
|
||||
ports:
|
||||
- port: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
targetPort: 9443
|
||||
selector:
|
||||
{{- include "capsule.selectorLabels" . | nindent 4 }}
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
54
charts/capsule/values.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
# Default values for capsule.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
manager:
|
||||
image:
|
||||
repository: quay.io/clastix/capsule
|
||||
pullPolicy: IfNotPresent
|
||||
tag: ''
|
||||
# Additional Capsule options
|
||||
options:
|
||||
logLevel: '4'
|
||||
forceTenantPrefix:
|
||||
capsuleUserGroup:
|
||||
protectedNamespaceRegex:
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
proxy:
|
||||
image:
|
||||
repository: gcr.io/kubebuilder/kube-rbac-proxy
|
||||
pullPolicy: IfNotPresent
|
||||
tag: "v0.5.0"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
mutatingWebhooksTimeoutSeconds: 30
|
||||
validatingWebhooksTimeoutSeconds: 30
|
||||
imagePullSecrets: []
|
||||
serviceAccount:
|
||||
create: true
|
||||
annotations: {}
|
||||
name: "capsule"
|
||||
podAnnotations: {}
|
||||
priorityClassName: '' #system-cluster-critical
|
||||
nodeSelector: {}
|
||||
# node-role.kubernetes.io/master: ""
|
||||
tolerations: []
|
||||
#- key: CriticalAddonsOnly
|
||||
# operator: Exists
|
||||
#- effect: NoSchedule
|
||||
# key: node-role.kubernetes.io/master
|
||||
replicaCount: 1
|
||||
affinity: {}
|
||||
podSecurityPolicy:
|
||||
enabled: false
|
||||
@@ -25,6 +25,10 @@ spec:
|
||||
description: The assigned Tenant owner kind
|
||||
name: Owner kind
|
||||
type: string
|
||||
- JSONPath: .spec.nodeSelector
|
||||
description: Node Selector applied to Pods
|
||||
name: Node selector
|
||||
type: string
|
||||
- JSONPath: .metadata.creationTimestamp
|
||||
description: Age
|
||||
name: Age
|
||||
@@ -37,6 +41,7 @@ spec:
|
||||
shortNames:
|
||||
- tnt
|
||||
singular: tenant
|
||||
preserveUnknownFields: false
|
||||
scope: Cluster
|
||||
subresources:
|
||||
status: {}
|
||||
@@ -59,19 +64,85 @@ spec:
|
||||
spec:
|
||||
description: TenantSpec defines the desired state of Tenant
|
||||
properties:
|
||||
additionalRoleBindings:
|
||||
items:
|
||||
properties:
|
||||
clusterRoleName:
|
||||
type: string
|
||||
subjects:
|
||||
description: kubebuilder:validation:Minimum=1
|
||||
items:
|
||||
description: Subject contains a reference to the object or user
|
||||
identities a role binding applies to. This can either hold
|
||||
a direct API object reference, or a value for non-objects
|
||||
such as user and group names.
|
||||
properties:
|
||||
apiGroup:
|
||||
description: APIGroup holds the API group of the referenced
|
||||
subject. Defaults to "" for ServiceAccount subjects. Defaults
|
||||
to "rbac.authorization.k8s.io" for User and Group subjects.
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of object being referenced. Values defined
|
||||
by this API group are "User", "Group", and "ServiceAccount".
|
||||
If the Authorizer does not recognized the kind value,
|
||||
the Authorizer should report an error.
|
||||
type: string
|
||||
name:
|
||||
description: Name of the object being referenced.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace of the referenced object. If the
|
||||
object kind is non-namespace, such as "User" or "Group",
|
||||
and this value is not empty the Authorizer should report
|
||||
an error.
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- clusterRoleName
|
||||
- subjects
|
||||
type: object
|
||||
type: array
|
||||
containerRegistries:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
externalServiceIPs:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
pattern: ^([0-9]{1,3}.){3}[0-9]{1,3}(/([0-9]|[1-2][0-9]|3[0-2]))?$
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- allowed
|
||||
type: object
|
||||
ingressClasses:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
allowedRegex:
|
||||
nullable: true
|
||||
type: string
|
||||
required:
|
||||
- allowed
|
||||
- allowedRegex
|
||||
type: object
|
||||
ingressHostnames:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
allowedRegex:
|
||||
type: string
|
||||
type: object
|
||||
limitRanges:
|
||||
items:
|
||||
@@ -151,6 +222,7 @@ spec:
|
||||
type: object
|
||||
type: array
|
||||
namespaceQuota:
|
||||
format: int32
|
||||
minimum: 1
|
||||
type: integer
|
||||
namespacesMetadata:
|
||||
@@ -158,16 +230,11 @@ spec:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
required:
|
||||
- additionalAnnotations
|
||||
- additionalLabels
|
||||
type: object
|
||||
networkPolicies:
|
||||
items:
|
||||
@@ -714,55 +781,33 @@ spec:
|
||||
additionalAnnotations:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
additionalLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
nullable: true
|
||||
type: object
|
||||
required:
|
||||
- additionalAnnotations
|
||||
- additionalLabels
|
||||
type: object
|
||||
storageClasses:
|
||||
properties:
|
||||
allowed:
|
||||
items:
|
||||
type: string
|
||||
nullable: true
|
||||
type: array
|
||||
allowedRegex:
|
||||
nullable: true
|
||||
type: string
|
||||
required:
|
||||
- allowed
|
||||
- allowedRegex
|
||||
type: object
|
||||
required:
|
||||
- ingressClasses
|
||||
- limitRanges
|
||||
- namespaceQuota
|
||||
- owner
|
||||
- storageClasses
|
||||
type: object
|
||||
status:
|
||||
description: TenantStatus defines the observed state of Tenant
|
||||
properties:
|
||||
groups:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
namespaces:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
size:
|
||||
type: integer
|
||||
users:
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- size
|
||||
type: object
|
||||
|
||||
@@ -3,5 +3,6 @@ resources:
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
images:
|
||||
- name: quay.io/clastix/capsule
|
||||
newTag: 0.0.1
|
||||
- name: controller
|
||||
newName: quay.io/clastix/capsule
|
||||
newTag: v0.0.4
|
||||
|
||||
@@ -34,7 +34,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: quay.io/clastix/capsule:latest
|
||||
image: controller
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
resources:
|
||||
|
||||
@@ -7,3 +7,9 @@ resources:
|
||||
- auth_proxy_role.yaml
|
||||
- auth_proxy_role_binding.yaml
|
||||
- auth_proxy_client_clusterrole.yaml
|
||||
# Uncomment the following 3 lines if you are running Capsule
|
||||
# in a cluster where [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/)
|
||||
# are enabled.
|
||||
# - psp_policy.yaml
|
||||
# - psp_role.yaml
|
||||
# - psp_role_binding.yaml
|
||||
|
||||
18
config/rbac/psp_policy.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
kind: PodSecurityPolicy
|
||||
apiVersion: policy/v1beta1
|
||||
metadata:
|
||||
name: capsule
|
||||
spec:
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
hostPorts:
|
||||
- max: 0
|
||||
min: 0
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- secret
|
||||
9
config/rbac/psp_role.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: capsule-psp
|
||||
rules:
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['capsule-psp']
|
||||
verbs: ['use']
|
||||
12
config/rbac/psp_role_binding.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: capsule-use-psp
|
||||
namespace: system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: capsule-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
@@ -4,6 +4,11 @@ kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
ingressHostnames:
|
||||
allowed:
|
||||
- my.oil.acmecorp.com
|
||||
- my.gas.acmecorp.com
|
||||
allowedRegex: "^.*acmecorp.com$"
|
||||
ingressClasses:
|
||||
allowed:
|
||||
- default
|
||||
@@ -90,3 +95,7 @@ spec:
|
||||
allowed:
|
||||
- default
|
||||
allowedRegex: ""
|
||||
containerRegistries:
|
||||
allowed:
|
||||
- docker.io
|
||||
allowedRegex: ""
|
||||
|
||||
0
config/samples/ingress.yaml
Normal file
@@ -2,5 +2,13 @@ resources:
|
||||
- manifests.yaml
|
||||
- service.yaml
|
||||
|
||||
patchesJson6902:
|
||||
- target:
|
||||
group: admissionregistration.k8s.io
|
||||
kind: ValidatingWebhookConfiguration
|
||||
name: validating-webhook-configuration
|
||||
version: v1beta1
|
||||
path: patch_ns_selector.yaml
|
||||
|
||||
configurations:
|
||||
- kustomizeconfig.yaml
|
||||
|
||||
@@ -23,6 +23,7 @@ webhooks:
|
||||
- CREATE
|
||||
resources:
|
||||
- namespaces
|
||||
|
||||
---
|
||||
apiVersion: admissionregistration.k8s.io/v1beta1
|
||||
kind: ValidatingWebhookConfiguration
|
||||
@@ -120,6 +121,41 @@ webhooks:
|
||||
- CREATE
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validating-v1-registry
|
||||
failurePolicy: Ignore
|
||||
name: pod.capsule.clastix.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
resources:
|
||||
- pods
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
name: webhook-service
|
||||
namespace: system
|
||||
path: /validating-external-service-ips
|
||||
failurePolicy: Fail
|
||||
name: validating-external-service-ips.capsule.clastix.io
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
apiVersions:
|
||||
- v1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- services
|
||||
- clientConfig:
|
||||
caBundle: Cg==
|
||||
service:
|
||||
@@ -135,6 +171,7 @@ webhooks:
|
||||
- v1alpha1
|
||||
operations:
|
||||
- CREATE
|
||||
- UPDATE
|
||||
resources:
|
||||
- tenants
|
||||
- clientConfig:
|
||||
|
||||
36
config/webhook/patch_ns_selector.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
- op: add
|
||||
path: /webhooks/0/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/1/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/3/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/4/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/5/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
- op: add
|
||||
path: /webhooks/6/namespaceSelector
|
||||
value:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/tenant
|
||||
operator: Exists
|
||||
@@ -54,7 +54,6 @@ func (r *Manager) SetupWithManager(mgr ctrl.Manager) (err error) {
|
||||
crErr := ctrl.NewControllerManagedBy(mgr).
|
||||
For(&rbacv1.ClusterRole{}, builder.WithPredicates(predicate.Funcs{
|
||||
CreateFunc: func(event event.CreateEvent) bool {
|
||||
|
||||
return r.filterByClusterRolesNames(event.Object.GetName())
|
||||
},
|
||||
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
|
||||
@@ -153,7 +152,7 @@ func (r *Manager) EnsureClusterRoleBinding() (err error) {
|
||||
func (r *Manager) EnsureClusterRole(roleName string) (err error) {
|
||||
role, ok := clusterRoles[roleName]
|
||||
if !ok {
|
||||
return fmt.Errorf("ClusterRole %s is not mapped", roleName)
|
||||
return fmt.Errorf("clusterRole %s is not mapped", roleName)
|
||||
}
|
||||
clusterRole := &rbacv1.ClusterRole{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"golang.org/x/sync/errgroup"
|
||||
v1 "k8s.io/api/admissionregistration/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -37,25 +37,22 @@ import (
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
type CaReconciler struct {
|
||||
type CAReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (r *CaReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
func (r *CAReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(caSecretName)).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r CaReconciler) UpdateValidatingWebhookConfiguration(wg *sync.WaitGroup, ch chan error, caBundle []byte) {
|
||||
defer wg.Done()
|
||||
|
||||
var err error
|
||||
|
||||
ch <- retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
//nolint:dupl
|
||||
func (r CAReconciler) UpdateValidatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
vw := &v1.ValidatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-validating-webhook-configuration"}, vw)
|
||||
if err != nil {
|
||||
@@ -72,12 +69,9 @@ func (r CaReconciler) UpdateValidatingWebhookConfiguration(wg *sync.WaitGroup, c
|
||||
})
|
||||
}
|
||||
|
||||
func (r CaReconciler) UpdateMutatingWebhookConfiguration(wg *sync.WaitGroup, ch chan error, caBundle []byte) {
|
||||
defer wg.Done()
|
||||
|
||||
var err error
|
||||
|
||||
ch <- retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
//nolint:dupl
|
||||
func (r CAReconciler) UpdateMutatingWebhookConfiguration(caBundle []byte) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
mw := &v1.MutatingWebhookConfiguration{}
|
||||
err = r.Get(context.TODO(), types.NamespacedName{Name: "capsule-mutating-webhook-configuration"}, mw)
|
||||
if err != nil {
|
||||
@@ -94,7 +88,7 @@ func (r CaReconciler) UpdateMutatingWebhookConfiguration(wg *sync.WaitGroup, ch
|
||||
})
|
||||
}
|
||||
|
||||
func (r CaReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
@@ -108,7 +102,7 @@ func (r CaReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var ca cert.Ca
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
if err != nil && errors.Is(err, MissingCaError{}) {
|
||||
@@ -131,28 +125,24 @@ func (r CaReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
|
||||
|
||||
var crt *bytes.Buffer
|
||||
var key *bytes.Buffer
|
||||
crt, _ = ca.CaCertificatePem()
|
||||
key, _ = ca.CaPrivateKeyPem()
|
||||
crt, _ = ca.CACertificatePem()
|
||||
key, _ = ca.CAPrivateKeyPem()
|
||||
|
||||
instance.Data = map[string][]byte{
|
||||
certSecretKey: crt.Bytes(),
|
||||
privateKeySecretKey: key.Bytes(),
|
||||
}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
ch := make(chan error, 2)
|
||||
group := errgroup.Group{}
|
||||
group.Go(func() error {
|
||||
return r.UpdateMutatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
group.Go(func() error {
|
||||
return r.UpdateValidatingWebhookConfiguration(crt.Bytes())
|
||||
})
|
||||
|
||||
go r.UpdateMutatingWebhookConfiguration(wg, ch, crt.Bytes())
|
||||
go r.UpdateValidatingWebhookConfiguration(wg, ch, crt.Bytes())
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
for err = range ch {
|
||||
if err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
if err = group.Wait(); err != nil {
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
func getCertificateAuthority(client client.Client, namespace string) (ca cert.Ca, err error) {
|
||||
func getCertificateAuthority(client client.Client, namespace string) (ca cert.CA, err error) {
|
||||
instance := &corev1.Secret{}
|
||||
|
||||
err = client.Get(context.TODO(), types.NamespacedName{
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package secret
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
@@ -34,20 +35,20 @@ import (
|
||||
"github.com/clastix/capsule/pkg/cert"
|
||||
)
|
||||
|
||||
type TlsReconciler struct {
|
||||
type TLSReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (r *TlsReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
func (r *TLSReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&corev1.Secret{}, forOptionPerInstanceName(tlsSecretName)).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r TlsReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
|
||||
var err error
|
||||
|
||||
r.Log = r.Log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
|
||||
@@ -61,7 +62,7 @@ func (r TlsReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
|
||||
return reconcile.Result{}, err
|
||||
}
|
||||
|
||||
var ca cert.Ca
|
||||
var ca cert.CA
|
||||
var rq time.Duration
|
||||
|
||||
ca, err = getCertificateAuthority(r.Client, r.Namespace)
|
||||
@@ -82,7 +83,8 @@ func (r TlsReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
|
||||
rq = 6 * 30 * 24 * time.Hour
|
||||
|
||||
opts := cert.NewCertOpts(time.Now().Add(rq), "capsule-webhook-service.capsule-system.svc")
|
||||
crt, key, err := ca.GenerateCertificate(opts)
|
||||
var crt, key *bytes.Buffer
|
||||
crt, key, err = ca.GenerateCertificate(opts)
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot generate new TLS certificate")
|
||||
return reconcile.Result{}, err
|
||||
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service_labels
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service_labels
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service_labels
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service_labels
|
||||
package servicelabels
|
||||
|
||||
import "fmt"
|
||||
|
||||
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package service_labels
|
||||
package servicelabels
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
@@ -19,12 +19,12 @@ package controllers
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"golang.org/x/sync/errgroup"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
@@ -76,56 +76,62 @@ func (r TenantReconciler) Reconcile(ctx context.Context, request ctrl.Request) (
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
r.Log.Error(err, "Error reading the object")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
// Ensuring all namespaces are collected
|
||||
r.Log.Info("Ensuring all Namespaces are collected")
|
||||
if err := r.collectNamespaces(instance); err != nil {
|
||||
if err = r.collectNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot collect Namespace resources")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Namespaces", "items", instance.Status.Namespaces.Len())
|
||||
if err := r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Info("Starting processing of Namespaces", "items", len(instance.Status.Namespaces))
|
||||
if err = r.syncNamespaces(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace items")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Network Policies", "items", len(instance.Spec.NetworkPolicies))
|
||||
if err := r.syncNetworkPolicies(instance); err != nil {
|
||||
if err = r.syncNetworkPolicies(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync NetworkPolicy items")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Node Selector")
|
||||
if err := r.ensureNodeSelector(instance); err != nil {
|
||||
if err = r.ensureNodeSelector(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces Node Selector items")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Limit Ranges", "items", len(instance.Spec.LimitRanges))
|
||||
if err := r.syncLimitRanges(instance); err != nil {
|
||||
if err = r.syncLimitRanges(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync LimitRange items")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Starting processing of Resource Quotas", "items", len(instance.Spec.ResourceQuota))
|
||||
if err := r.syncResourceQuotas(instance); err != nil {
|
||||
if err = r.syncResourceQuotas(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync ResourceQuota items")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring PSP for owner")
|
||||
if err = r.syncAdditionalRoleBindings(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync additional Role Bindings items")
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring RoleBinding for owner")
|
||||
if err := r.ownerRoleBinding(instance); err != nil {
|
||||
if err = r.ownerRoleBinding(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync owner RoleBinding")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Ensuring Namespace count")
|
||||
if err := r.ensureNamespaceCount(instance); err != nil {
|
||||
if err = r.ensureNamespaceCount(instance); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespace count")
|
||||
return reconcile.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
r.Log.Info("Tenant reconciling completed")
|
||||
@@ -149,7 +155,8 @@ func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client
|
||||
s = s.Add(*exists)
|
||||
|
||||
if len(keys) > 0 {
|
||||
notIn, err := labels.NewRequirement(capsuleLabel, selection.NotIn, keys)
|
||||
var notIn *labels.Requirement
|
||||
notIn, err = labels.NewRequirement(capsuleLabel, selection.NotIn, keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -157,7 +164,7 @@ func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client
|
||||
}
|
||||
|
||||
r.Log.Info("Pruning objects with label selector " + s.String())
|
||||
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
return r.DeleteAllOf(context.TODO(), obj, &client.DeleteAllOfOptions{
|
||||
ListOptions: client.ListOptions{
|
||||
LabelSelector: s,
|
||||
@@ -166,56 +173,114 @@ func (r *TenantReconciler) pruningResources(ns string, keys []string, obj client
|
||||
DeleteOptions: client.DeleteOptions{},
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Serial ResourceQuota processing is expensive: using Go routines we can speed it up.
|
||||
// In case of multiple errors these are logged properly, returning a generic error since we have to repush back the
|
||||
// reconciliation loop.
|
||||
func (r *TenantReconciler) resourceQuotasUpdate(resourceName corev1.ResourceName, qt resource.Quantity, list ...corev1.ResourceQuota) (err error) {
|
||||
ch := make(chan error, len(list))
|
||||
func (r *TenantReconciler) resourceQuotasUpdate(resourceName corev1.ResourceName, actual, limit resource.Quantity, list ...corev1.ResourceQuota) error {
|
||||
g := errgroup.Group{}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(len(list))
|
||||
|
||||
f := func(rq corev1.ResourceQuota, wg *sync.WaitGroup, ch chan error) {
|
||||
defer wg.Done()
|
||||
ch <- retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
// Retrieving from the cache the actual ResourceQuota
|
||||
for _, item := range list {
|
||||
rq := item
|
||||
g.Go(func() error {
|
||||
found := &corev1.ResourceQuota{}
|
||||
_ = r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found)
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
if err := r.Get(context.TODO(), types.NamespacedName{Namespace: rq.Namespace, Name: rq.Name}, found); err != nil {
|
||||
return err
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1alpha1.UsedQuotaFor(resourceName)] = qt.String()
|
||||
// Updating the Resource according to the qt.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return r.Update(context.TODO(), found, &client.UpdateOptions{})
|
||||
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
_, err := controllerutil.CreateOrUpdate(context.TODO(), r.Client, found, func() error {
|
||||
// Ensuring annotation map is there to avoid uninitialized map error and
|
||||
// assigning the overall usage
|
||||
if found.Annotations == nil {
|
||||
found.Annotations = make(map[string]string)
|
||||
}
|
||||
found.Labels = rq.Labels
|
||||
found.Annotations[capsulev1alpha1.UsedQuotaFor(resourceName)] = actual.String()
|
||||
found.Annotations[capsulev1alpha1.HardQuotaFor(resourceName)] = limit.String()
|
||||
// Updating the Resource according to the actual.Cmp result
|
||||
found.Spec.Hard = rq.Spec.Hard
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
for _, rq := range list {
|
||||
go f(rq, wg, ch)
|
||||
var err error
|
||||
if err = g.Wait(); err != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time according to the Tenant controller back-off factor.
|
||||
r.Log.Error(err, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed: %s", err.Error())
|
||||
}
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
for e := range ch {
|
||||
if e != nil {
|
||||
// We had an error and we mark the whole transaction as failed
|
||||
// to process it another time acording to the Tenant controller back-off factor.
|
||||
r.Log.Error(e, "Cannot update outer ResourceQuotas", "resourceName", resourceName.String())
|
||||
err = fmt.Errorf("update of outer ResourceQuota items has failed")
|
||||
return err
|
||||
}
|
||||
|
||||
// Additional Role Bindings can be used in many ways: applying Pod Security Policies or giving
|
||||
// access to CRDs or specific API groups.
|
||||
func (r *TenantReconciler) syncAdditionalRoleBindings(tenant *capsulev1alpha1.Tenant) (err error) {
|
||||
// hashing the RoleBinding name due to DNS RFC-1123 applied to Kubernetes labels
|
||||
hash := func(value string) string {
|
||||
h := fnv.New64a()
|
||||
_, _ = h.Write([]byte(value))
|
||||
return fmt.Sprintf("%x", h.Sum64())
|
||||
}
|
||||
// getting requested Role Binding keys
|
||||
var keys []string
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
keys = append(keys, hash(i.ClusterRoleName))
|
||||
}
|
||||
|
||||
var tl, ll string
|
||||
tl, err = capsulev1alpha1.GetTypeLabel(&capsulev1alpha1.Tenant{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ll, err = capsulev1alpha1.GetTypeLabel(&rbacv1.RoleBinding{})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
if err = r.pruningResources(ns, keys, &rbacv1.RoleBinding{}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, i := range tenant.Spec.AdditionalRoleBindings {
|
||||
lv := hash(i.ClusterRoleName)
|
||||
rb := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("capsule-%s-%s", tenant.Name, i.ClusterRoleName),
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
var res controllerutil.OperationResult
|
||||
res, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, rb, func() error {
|
||||
rb.ObjectMeta.Labels = map[string]string{
|
||||
tl: tenant.Name,
|
||||
ll: lv,
|
||||
}
|
||||
rb.RoleRef = rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: i.ClusterRoleName,
|
||||
}
|
||||
rb.Subjects = i.Subjects
|
||||
return controllerutil.SetControllerReference(tenant, rb, r.Scheme)
|
||||
})
|
||||
if err != nil {
|
||||
r.Log.Error(err, "Cannot sync Additional RoleBinding")
|
||||
}
|
||||
r.Log.Info(fmt.Sprintf("Additional RoleBindings sync result: %s", string(res)), "name", rb.Name, "namespace", rb.Namespace)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// We're relying on the ResourceQuota resource to represent the resource quota for the single Tenant rather than the
|
||||
@@ -320,7 +385,7 @@ func (r *TenantReconciler) syncResourceQuotas(tenant *capsulev1alpha1.Tenant) er
|
||||
default:
|
||||
// The Tenant is respecting the Hard quota:
|
||||
// restoring the default one for all the elements,
|
||||
// also for the reconciliated one.
|
||||
// also for the reconciled one.
|
||||
for i := range rql.Items {
|
||||
if rql.Items[i].Spec.Hard == nil {
|
||||
rql.Items[i].Spec.Hard = map[corev1.ResourceName]resource.Quantity{}
|
||||
@@ -329,7 +394,7 @@ func (r *TenantReconciler) syncResourceQuotas(tenant *capsulev1alpha1.Tenant) er
|
||||
}
|
||||
target.Spec = q
|
||||
}
|
||||
if err := r.resourceQuotasUpdate(rn, qt, rql.Items...); err != nil {
|
||||
if err := r.resourceQuotasUpdate(rn, qt, q.Hard[rn], rql.Items...); err != nil {
|
||||
r.Log.Error(err, "cannot proceed with outer ResourceQuota")
|
||||
return err
|
||||
}
|
||||
@@ -393,33 +458,50 @@ func (r *TenantReconciler) syncLimitRanges(tenant *capsulev1alpha1.Tenant) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) syncNamespace(namespace string, ingressClassesSpec capsulev1alpha1.IngressClassesSpec, storageClassesSpec capsulev1alpha1.StorageClassesSpec, nsMetadata capsulev1alpha1.AdditionalMetadata, tenantLabel string, wg *sync.WaitGroup, channel chan error) {
|
||||
defer wg.Done()
|
||||
|
||||
ns := &corev1.Namespace{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
channel <- err
|
||||
}
|
||||
|
||||
channel <- retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
func (r *TenantReconciler) syncNamespace(namespace string, tnt *capsulev1alpha1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
ns := &corev1.Namespace{}
|
||||
if err = r.Client.Get(context.TODO(), types.NamespacedName{Name: namespace}, ns); err != nil {
|
||||
return
|
||||
}
|
||||
a := ns.GetAnnotations()
|
||||
if a == nil {
|
||||
a = make(map[string]string)
|
||||
}
|
||||
if len(ingressClassesSpec.Allowed) > 0 {
|
||||
a[capsulev1alpha1.AvailableIngressClassesAnnotation] = strings.Join(ingressClassesSpec.Allowed, ",")
|
||||
// resetting Capsule annotations
|
||||
delete(a, capsulev1alpha1.AvailableIngressClassesAnnotation)
|
||||
delete(a, capsulev1alpha1.AvailableIngressClassesRegexpAnnotation)
|
||||
delete(a, capsulev1alpha1.AvailableStorageClassesAnnotation)
|
||||
delete(a, capsulev1alpha1.AvailableStorageClassesRegexpAnnotation)
|
||||
delete(a, capsulev1alpha1.AllowedRegistriesAnnotation)
|
||||
delete(a, capsulev1alpha1.AllowedRegistriesRegexpAnnotation)
|
||||
|
||||
if tnt.Spec.IngressClasses != nil {
|
||||
if len(tnt.Spec.IngressClasses.Exact) > 0 {
|
||||
a[capsulev1alpha1.AvailableIngressClassesAnnotation] = strings.Join(tnt.Spec.IngressClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.IngressClasses.Regex) > 0 {
|
||||
a[capsulev1alpha1.AvailableIngressClassesRegexpAnnotation] = tnt.Spec.IngressClasses.Regex
|
||||
}
|
||||
}
|
||||
if len(ingressClassesSpec.AllowedRegex) > 0 {
|
||||
a[capsulev1alpha1.AvailableIngressClassesRegexpAnnotation] = ingressClassesSpec.AllowedRegex
|
||||
if tnt.Spec.StorageClasses != nil {
|
||||
if len(tnt.Spec.StorageClasses.Exact) > 0 {
|
||||
a[capsulev1alpha1.AvailableStorageClassesAnnotation] = strings.Join(tnt.Spec.StorageClasses.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.StorageClasses.Regex) > 0 {
|
||||
a[capsulev1alpha1.AvailableStorageClassesRegexpAnnotation] = tnt.Spec.StorageClasses.Regex
|
||||
}
|
||||
}
|
||||
if len(storageClassesSpec.Allowed) > 0 {
|
||||
a[capsulev1alpha1.AvailableStorageClassesAnnotation] = strings.Join(storageClassesSpec.Allowed, ",")
|
||||
}
|
||||
if len(storageClassesSpec.AllowedRegex) > 0 {
|
||||
a[capsulev1alpha1.AvailableStorageClassesRegexpAnnotation] = storageClassesSpec.AllowedRegex
|
||||
if tnt.Spec.ContainerRegistries != nil {
|
||||
if len(tnt.Spec.ContainerRegistries.Exact) > 0 {
|
||||
a[capsulev1alpha1.AllowedRegistriesAnnotation] = strings.Join(tnt.Spec.ContainerRegistries.Exact, ",")
|
||||
}
|
||||
if len(tnt.Spec.ContainerRegistries.Regex) > 0 {
|
||||
a[capsulev1alpha1.AllowedRegistriesRegexpAnnotation] = tnt.Spec.ContainerRegistries.Regex
|
||||
}
|
||||
}
|
||||
|
||||
if aa := nsMetadata.AdditionalAnnotations; aa != nil {
|
||||
if aa := tnt.Spec.NamespacesMetadata.AdditionalAnnotations; aa != nil {
|
||||
for k, v := range aa {
|
||||
a[k] = v
|
||||
}
|
||||
@@ -429,12 +511,9 @@ func (r *TenantReconciler) syncNamespace(namespace string, ingressClassesSpec ca
|
||||
if l == nil {
|
||||
l = make(map[string]string)
|
||||
}
|
||||
capsuleLabel, err := capsulev1alpha1.GetTypeLabel(&capsulev1alpha1.Tenant{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l[capsuleLabel] = tenantLabel
|
||||
if al := nsMetadata.AdditionalLabels; al != nil {
|
||||
capsuleLabel, _ := capsulev1alpha1.GetTypeLabel(&capsulev1alpha1.Tenant{})
|
||||
l[capsuleLabel] = tnt.GetName()
|
||||
if al := tnt.Spec.NamespacesMetadata.AdditionalLabels; al != nil {
|
||||
for k, v := range al {
|
||||
l[k] = v
|
||||
}
|
||||
@@ -449,22 +528,18 @@ func (r *TenantReconciler) syncNamespace(namespace string, ingressClassesSpec ca
|
||||
|
||||
// Ensuring all annotations are applied to each Namespace handled by the Tenant.
|
||||
func (r *TenantReconciler) syncNamespaces(tenant *capsulev1alpha1.Tenant) (err error) {
|
||||
ch := make(chan error, tenant.Status.Namespaces.Len())
|
||||
group := errgroup.Group{}
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(tenant.Status.Namespaces.Len())
|
||||
|
||||
for _, ns := range tenant.Status.Namespaces {
|
||||
go r.syncNamespace(ns, tenant.Spec.IngressClasses, tenant.Spec.StorageClasses, tenant.Spec.NamespacesMetadata, tenant.GetName(), wg, ch)
|
||||
for _, item := range tenant.Status.Namespaces {
|
||||
namespace := item
|
||||
group.Go(func() error {
|
||||
return r.syncNamespace(namespace, tenant)
|
||||
})
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
|
||||
for e := range ch {
|
||||
if e != nil {
|
||||
err = multierror.Append(e, err)
|
||||
}
|
||||
if err = group.Wait(); err != nil {
|
||||
r.Log.Error(err, "Cannot sync Namespaces")
|
||||
err = fmt.Errorf("cannot sync Namespaces: %s", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -573,16 +648,7 @@ func (r *TenantReconciler) ownerRoleBinding(tenant *capsulev1alpha1.Tenant) erro
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) ensureNodeSelector(tenant *capsulev1alpha1.Tenant) (err error) {
|
||||
if tenant.Spec.NodeSelector == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, namespace := range tenant.Status.Namespaces {
|
||||
selectorMap := tenant.Spec.NodeSelector
|
||||
if selectorMap == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ns := &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
@@ -595,7 +661,7 @@ func (r *TenantReconciler) ensureNodeSelector(tenant *capsulev1alpha1.Tenant) (e
|
||||
ns.Annotations = make(map[string]string)
|
||||
}
|
||||
var selector []string
|
||||
for k, v := range selectorMap {
|
||||
for k, v := range tenant.Spec.NodeSelector {
|
||||
selector = append(selector, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
ns.Annotations["scheduler.alpha.kubernetes.io/node-selector"] = strings.Join(selector, ",")
|
||||
@@ -612,7 +678,7 @@ func (r *TenantReconciler) ensureNodeSelector(tenant *capsulev1alpha1.Tenant) (e
|
||||
|
||||
func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1alpha1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
|
||||
tenant.Status.Size = uint(tenant.Status.Namespaces.Len())
|
||||
tenant.Status.Size = uint(len(tenant.Status.Namespaces))
|
||||
found := &capsulev1alpha1.Tenant{}
|
||||
if err := r.Client.Get(context.TODO(), types.NamespacedName{Name: tenant.GetName()}, found); err != nil {
|
||||
return err
|
||||
@@ -622,17 +688,19 @@ func (r *TenantReconciler) ensureNamespaceCount(tenant *capsulev1alpha1.Tenant)
|
||||
})
|
||||
}
|
||||
|
||||
func (r *TenantReconciler) collectNamespaces(tenant *capsulev1alpha1.Tenant) (err error) {
|
||||
nl := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), nl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
if err != nil {
|
||||
func (r *TenantReconciler) collectNamespaces(tenant *capsulev1alpha1.Tenant) error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
|
||||
nl := &corev1.NamespaceList{}
|
||||
err = r.Client.List(context.TODO(), nl, client.MatchingFieldsSelector{
|
||||
Selector: fields.OneTermEqualSelector(".metadata.ownerReferences[*].capsule", tenant.GetName()),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
tenant.AssignNamespaces(nl.Items)
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
}
|
||||
tenant.AssignNamespaces(nl.Items)
|
||||
_, err = controllerutil.CreateOrUpdate(context.TODO(), r.Client, tenant.DeepCopy(), func() error {
|
||||
return r.Client.Status().Update(context.TODO(), tenant, &client.UpdateOptions{})
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
42
docs/index.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Capsule Documentation
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
Currently, the Capsule ecosystem comprises the following:
|
||||
|
||||
* [Capsule Operator](./operator/overview.md)
|
||||
* [Capsule Proxy](./proxy/overview.md)
|
||||
* [Capsule Lens extension](lens-extension/overview.md) Coming soon!
|
||||
|
||||
## Documents structure
|
||||
```command
|
||||
docs
|
||||
├── index.md
|
||||
├── lens-extension
|
||||
│ └── overview.md
|
||||
├── proxy
|
||||
│ ├── overview.md
|
||||
│ ├── sidecar.md
|
||||
│ └── standalone.md
|
||||
└── operator
|
||||
├── contributing.md
|
||||
├── getting-started.md
|
||||
├── monitoring.md
|
||||
├── overview.md
|
||||
├── references.md
|
||||
└── use-cases
|
||||
├── create-namespaces.md
|
||||
├── custom-resources.md
|
||||
├── images-registries.md
|
||||
├── ingress-classes.md
|
||||
├── ingress-hostnames.md
|
||||
├── multiple-tenants.md
|
||||
├── network-policies.md
|
||||
├── nodes-pool.md
|
||||
├── onboarding.md
|
||||
├── overview.md
|
||||
├── permissions.md
|
||||
├── pod-security-policies.md
|
||||
├── resources-quota-limits.md
|
||||
├── storage-classes.md
|
||||
└── taint-namespaces.md
|
||||
```
|
||||
2
docs/lens-extension/overview.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# Capsule extension for Mirantis Lens
|
||||
Coming soon.
|
||||
@@ -1,11 +1,9 @@
|
||||
# How to contribute to Capsule
|
||||
|
||||
First, thanks for your interest in Capsule, any contribution is welcome!
|
||||
|
||||
The first step is to set up your local development environment
|
||||
The first step is to set up your local development environment as stated below:
|
||||
|
||||
## Setting up the development environment
|
||||
|
||||
The following dependencies are mandatory:
|
||||
|
||||
- [Go 1.13.8](https://golang.org/dl/)
|
||||
@@ -16,7 +14,6 @@ The following dependencies are mandatory:
|
||||
- [golangci-lint](https://github.com/golangci/golangci-lint)
|
||||
|
||||
### Installing Go dependencies
|
||||
|
||||
After cloning Capsule on any folder, access it and issue the following command
|
||||
to ensure all dependencies are properly downloaded.
|
||||
|
||||
@@ -25,20 +22,17 @@ go mod download
|
||||
```
|
||||
|
||||
### Installing Operator SDK
|
||||
|
||||
Some operations, like the Docker image build process or the code-generation of
|
||||
the CRDs manifests, as well the deep copy functions, require _Operator SDK_:
|
||||
the binary has to be installed into your `PATH`.
|
||||
|
||||
### Installing Kubebuilder
|
||||
|
||||
With the latest release of OperatorSDK there's a more tightly integration with
|
||||
Kubebuilder and its opinionated testing suite: ensure to download the latest
|
||||
binaries available from the _Releases_ GitHub page and place them into the
|
||||
`/usr/local/kubebuilder/bin` folder, ensuring this is also in your `PATH`.
|
||||
|
||||
### Installing KinD
|
||||
|
||||
Capsule can run on any certified Kubernetes installation and locally
|
||||
the whole development is performed on _KinD_, also knows as
|
||||
[Kubernetes in Docker](https://github.com/kubernetes-sigs/kind).
|
||||
@@ -72,75 +66,21 @@ The current `KUBECONFIG` will be populated with the `cluster-admin`
|
||||
certificates and the context changed to the just born Kubernetes cluster.
|
||||
|
||||
### Build the Docker image and push it to KinD
|
||||
|
||||
From the root path, issue the _make_ recipe:
|
||||
|
||||
```
|
||||
# make docker-build
|
||||
/home/prometherion/go/bin/controller-gen object:headerFile="hack/boilerplate.go.txt" paths="./..."
|
||||
go fmt ./...
|
||||
main.go
|
||||
go vet ./...
|
||||
/home/prometherion/go/bin/controller-gen "crd:trivialVersions=true" rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
|
||||
go test ./... -coverprofile cover.out
|
||||
...
|
||||
docker build . -t quay.io/clastix/capsule:latest
|
||||
Sending build context to Docker daemon 43.21MB
|
||||
Step 1/15 : FROM golang:1.13 as builder
|
||||
---> 67d10cb69049
|
||||
Step 2/15 : WORKDIR /workspace
|
||||
---> Using cache
|
||||
---> d783cc2b7c33
|
||||
Step 3/15 : COPY go.mod go.mod
|
||||
---> Using cache
|
||||
---> 0fec3ca39e50
|
||||
Step 4/15 : COPY go.sum go.sum
|
||||
---> Using cache
|
||||
---> de15be20dbe7
|
||||
Step 5/15 : RUN go mod download
|
||||
---> Using cache
|
||||
---> b525cd9abc67
|
||||
Step 6/15 : COPY main.go main.go
|
||||
---> 67d9d6538ffc
|
||||
Step 7/15 : COPY api/ api/
|
||||
---> 6243b250d170
|
||||
Step 8/15 : COPY controllers/ controllers/
|
||||
---> 4abf8ce85484
|
||||
Step 9/15 : COPY pkg/ pkg/
|
||||
---> 2cd289b1d496
|
||||
Step 10/15 : RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go
|
||||
---> Running in dac9a1e3b23f
|
||||
Removing intermediate container dac9a1e3b23f
|
||||
---> bb650a8efcb2
|
||||
Step 11/15 : FROM gcr.io/distroless/static:nonroot
|
||||
---> 131713291b92
|
||||
Step 12/15 : WORKDIR /
|
||||
---> Using cache
|
||||
---> 677a73ab94d3
|
||||
Step 13/15 : COPY --from=builder /workspace/manager .
|
||||
---> 6ecb58a82c0a
|
||||
Step 14/15 : USER nonroot:nonroot
|
||||
---> Running in a0b8c95f85d4
|
||||
Removing intermediate container a0b8c95f85d4
|
||||
---> c4897d60a094
|
||||
Step 15/15 : ENTRYPOINT ["/manager"]
|
||||
---> Running in 1a42bab52aa7
|
||||
Removing intermediate container 1a42bab52aa7
|
||||
---> 37d2adbe2669
|
||||
Successfully built 37d2adbe2669
|
||||
Successfully tagged quay.io/clastix/capsule:latest
|
||||
```
|
||||
|
||||
The image `quay.io/clastix/capsule:latest` will be available locally, you just
|
||||
need to push it to _KinD_ with the following command.
|
||||
The image `quay.io/clastix/capsule:<tag>` will be available locally. Built image `<tag>` is resulting last one available [release](https://github.com/clastix/capsule/releases).
|
||||
|
||||
Push it to _KinD_ with the following command:
|
||||
|
||||
```
|
||||
# kind load docker-image --nodes capsule-control-plane --name capsule quay.io/clastix/capsule:latest
|
||||
Image: "quay.io/clastix/capsule:latest" with ID "sha256:ebb8f640dda129a795ddc68bad125cb50af6bfb8803be210b56314ded6355759" not yet present on node "capsule-control-plane", loading...
|
||||
# kind load docker-image --nodes capsule-control-plane --name capsule quay.io/clastix/capsule:<tag>
|
||||
```
|
||||
|
||||
### Deploy the Kubernetes manifests
|
||||
|
||||
With the current `kind-capsule` context enabled, deploy all the required
|
||||
manifests issuing the following command:
|
||||
|
||||
@@ -154,21 +94,6 @@ You can check if Capsule is running tailing the logs:
|
||||
|
||||
```
|
||||
# kubectl -n capsule-system logs --all-containers -f -l control-plane=controller-manager
|
||||
...
|
||||
2020-08-03T15:37:44.031Z INFO controllers.Tenant Role Binding sync result: unchanged {"Request.Name": "oil", "name": "namespace-deleter", "namespace": "oil-dev"}
|
||||
2020-08-03T15:37:44.032Z INFO controllers.Tenant Role Binding sync result: unchanged {"Request.Name": "oil", "name": "namespace:admin", "namespace": "oil-production"}
|
||||
2020-08-03T15:37:44.032Z INFO controllers.Tenant Role Binding sync result: unchanged {"Request.Name": "oil", "name": "namespace-deleter", "namespace": "oil-production"}
|
||||
2020-08-03T15:37:44.032Z INFO controllers.Tenant Tenant reconciling completed {"Request.Name": "oil"}
|
||||
2020-08-03T15:37:44.032Z DEBUG controller-runtime.controller Successfully Reconciled {"controller": "tenant", "request": "/oil"}
|
||||
2020-08-03T15:37:46.945Z INFO controllers.Namespace Reconciling Namespace {"Request.Name": "oil-staging"}
|
||||
2020-08-03T15:37:46.953Z INFO controllers.Namespace Namespace reconciliation processed {"Request.Name": "oil-staging"}
|
||||
2020-08-03T15:37:46.953Z DEBUG controller-runtime.controller Successfully Reconciled {"controller": "namespace", "request": "/oil-staging"}
|
||||
2020-08-03T15:37:46.957Z INFO controllers.Namespace Reconciling Namespace {"Request.Name": "oil-staging"}
|
||||
2020-08-03T15:37:46.957Z DEBUG controller-runtime.controller Successfully Reconciled {"controller": "namespace", "request": "/oil-staging"}
|
||||
I0803 15:16:01.763606 1 main.go:186] Valid token audiences:
|
||||
I0803 15:16:01.763689 1 main.go:232] Generating self signed cert as no cert is provided
|
||||
I0803 15:16:02.042022 1 main.go:281] Starting TCP socket on 0.0.0.0:8443
|
||||
I0803 15:16:02.042364 1 main.go:288] Listening securely on 0.0.0.0:8443
|
||||
```
|
||||
|
||||
Since Capsule is built using _OperatorSDK_, logging is handled by the zap
|
||||
@@ -185,12 +110,10 @@ it is suggested to use the `--zap-devel` flag to get also stack traces.
|
||||
> application to serve properly HTTPS requests.
|
||||
|
||||
### Run Capsule locally
|
||||
|
||||
Debugging remote applications is always struggling but Operators just need
|
||||
access to the Kubernetes API Server.
|
||||
|
||||
#### Scaling down the remote Pod
|
||||
|
||||
First, ensure the Capsule pod is not running scaling down the Deployment.
|
||||
|
||||
```
|
||||
@@ -201,7 +124,6 @@ deployment.apps/capsule-controller-manager scaled
|
||||
> This is mandatory since Capsule uses Leader Election
|
||||
|
||||
#### Providing TLS certificate for webhooks
|
||||
|
||||
Next step is to replicate the same environment Capsule is expecting in the Pod,
|
||||
it means creating a fake certificate to handle HTTP requests.
|
||||
|
||||
@@ -217,7 +139,6 @@ kubectl -n capsule-system get secret capsule-tls -o jsonpath='{.data.tls\.key}'
|
||||
> to provide a self-signed certificate in the said directory.
|
||||
|
||||
#### Starting NGROK
|
||||
|
||||
In another session, we need a `ngrok` session, mandatory to debug also webhooks
|
||||
(YMMV).
|
||||
|
||||
@@ -241,7 +162,6 @@ since we're going to use this default URL as the `url` parameter for the
|
||||
_Dynamic Admissions Control Webhooks_.
|
||||
|
||||
#### Patching the MutatingWebhookConfiguration
|
||||
|
||||
Now it's time to patch the _MutatingWebhookConfiguration_ and the
|
||||
_ValidatingWebhookConfiguration_ too, adding the said `ngrok` URL as base for
|
||||
each defined webhook, as following:
|
||||
@@ -270,7 +190,6 @@ webhooks:
|
||||
```
|
||||
|
||||
#### Run Capsule
|
||||
|
||||
Finally, it's time to run locally Capsule using your preferred IDE (or not):
|
||||
from the project root path, you can issue the following command.
|
||||
|
||||
@@ -282,21 +201,20 @@ All the logs will start to flow in your standard output, feel free to attach
|
||||
your debugger to set breakpoints as well!
|
||||
|
||||
## Code convention
|
||||
|
||||
The changes must follow the Pull Request method where a _GitHub Action_ will
|
||||
check the `golangci-lint`, so ensure your changes respect the coding standard.
|
||||
|
||||
### golint
|
||||
|
||||
You can easily check them issuing the _Make_ recipe `golint`.
|
||||
|
||||
```
|
||||
# make golint
|
||||
golangci-lint run
|
||||
golangci-lint run -c .golangci.yml
|
||||
```
|
||||
|
||||
### goimports
|
||||
> Enabled linters and related options are defined in the [.golanci.yml file](../../.golangci.yml)
|
||||
|
||||
### goimports
|
||||
Also, the Go import statements must be sorted following the best practice:
|
||||
|
||||
```
|
||||
@@ -315,7 +233,6 @@ goimports -w -l -local "github.com/clastix/capsule" .
|
||||
```
|
||||
|
||||
### Commits
|
||||
|
||||
All the Pull Requests must refer to an already open issue: this is the first phase to contribute also for informing maintainers about the issue.
|
||||
|
||||
Commit's first line should not exceed 50 columns.
|
||||
123
docs/operator/getting-started.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Getting started
|
||||
Thanks for giving Capsule a try.
|
||||
|
||||
## Installation
|
||||
Make sure you have access to a Kubernetes cluster as administrator.
|
||||
|
||||
There are two ways to install Capsule:
|
||||
|
||||
* Use the Helm Chart available [here](https://github.com/clastix/capsule/tree/master/charts/capsule)
|
||||
* Use [`kustomize`](https://github.com/kubernetes-sigs/kustomize)
|
||||
|
||||
### Install with kustomize
|
||||
Ensure you have `kubectl` and `kustomize` installed in your `PATH`.
|
||||
|
||||
Clone this repository and move to the repo folder:
|
||||
|
||||
```
|
||||
$ git clone https://github.com/clastix/capsule
|
||||
$ cd capsule
|
||||
$ make deploy
|
||||
```
|
||||
|
||||
It will install the Capsule controller in a dedicated namespace `capsule-system`.
|
||||
|
||||
# Create your first Tenant
|
||||
In Capsule, a _Tenant_ is an abstraction to group togheter multiple namespaces in a single entity within a set of bundaries defined by the Cluster Administrator. The tenant is then assigned to a user or group of users who is called _Tenant Owner_.
|
||||
|
||||
Capsule defines a Tenant as Custom Resource with cluster scope:
|
||||
|
||||
```yaml
|
||||
cat <<EOF > oil_tenant.yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespaceQuota: 3
|
||||
EOF
|
||||
```
|
||||
|
||||
Apply as cluster admin:
|
||||
|
||||
```
|
||||
$ kubectl apply -f oil_tenant.yaml
|
||||
tenant.capsule.clastix.io/oil created
|
||||
```
|
||||
|
||||
You can check the tenant just created as cluster admin
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 0 alice User 1m
|
||||
```
|
||||
|
||||
## Tenant owners
|
||||
Each tenant comes with a delegated user or group of users acting as the tenant admin. In the Capsule jargon, this is called the _Tenant Owner_. Other users can operate inside a tenant with different levels of permissions and authorizations assigned directly by the Tenant Owner.
|
||||
|
||||
Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the authentication strategy in your cluster.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
in their token.
|
||||
|
||||
The [hack/create-user.sh](hack/create-user.sh) can help you set up a dummy `kubeconfig` for the `alice` user acting as owner of a tenant called `oil`
|
||||
|
||||
```bash
|
||||
./hack/create-user.sh alice oil
|
||||
creating certs in TMPDIR /tmp/tmp.4CLgpuime3
|
||||
Generating RSA private key, 2048 bit long modulus (2 primes)
|
||||
............+++++
|
||||
........................+++++
|
||||
e is 65537 (0x010001)
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil created
|
||||
certificatesigningrequest.certificates.k8s.io/alice-oil approved
|
||||
kubeconfig file is: alice-oil.kubeconfig
|
||||
to use it as alice export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
Log as tenant owner
|
||||
|
||||
```
|
||||
$ export KUBECONFIG=alice-oil.kubeconfig
|
||||
```
|
||||
|
||||
and create a couple of new namespaces
|
||||
|
||||
```
|
||||
$ kubectl create namespace oil-production
|
||||
$ kubectl create namespace oil-development
|
||||
```
|
||||
|
||||
As user `alice` you can operate with fully admin permissions:
|
||||
|
||||
```
|
||||
$ kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
$ kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
but limited to only your own namespaces:
|
||||
|
||||
```
|
||||
$ kubectl -n kube-system get pods
|
||||
Error from server (Forbidden): pods is forbidden: User "alice" cannot list resource "pods" in API group "" in the namespace "kube-system"
|
||||
```
|
||||
|
||||
# What’s next
|
||||
The Tenant Owners have full administrative permissions limited to only the namespaces in the assigned tenant. However, their permissions can be controlled by the Cluster Admin by setting rules and policies on the assigned tenant. See the [use cases](./use-cases/overview.md) page for more getting more cool things you can do with Capsule.
|
||||
2
docs/operator/monitoring.md
Normal file
@@ -0,0 +1,2 @@
|
||||
# Monitoring Capsule
|
||||
Coming soon.
|
||||
41
docs/operator/overview.md
Normal file
@@ -0,0 +1,41 @@
|
||||
# Kubernetes multi-tenancy made simple
|
||||
**Capsule** helps to implement a multi-tenancy and policy-based environment in your Kubernetes cluster. It is not intended to be yet another _PaaS_, instead, it has been designed as a micro-services based ecosystem with minimalist approach, leveraging only on upstream Kubernetes.
|
||||
|
||||
# What's the problem with the current status?
|
||||
Kubernetes introduces the _Namespace_ object type to create logical partitions of the cluster as isolated *slices*. However, implementing advanced multi-tenancy scenarios, it becomes soon complicated because of the flat structure of Kubernetes namespaces and the impossibility to share resources among namespaces belonging to the same tenant. To overcome this, cluster admins tend to provision a dedicated cluster for each groups of users, teams, or departments. As an organization grows, the number of clusters to manage and keep aligned becomes an operational nightmare, described as the well know phenomena of the _clusters sprawl_.
|
||||
|
||||
# Entering Caspule
|
||||
Capsule takes a different approach. In a single cluster, it aggregates multiple namespaces in a lightweight abstraction called _Tenant_. Within each tenant, users are free to create their namespaces and share all the assigned resources while a Policy Engine keeps different tenants isolated from each other. The _Network and Security Policies_, _Resource Quota_, _Limit Ranges_, _RBAC_, and other policies defined at the tenant level are automatically inherited by all the namespaces in the tenant. And users are free to operate their tenants in authonomy, without the intervention of the cluster administrator.
|
||||
|
||||
# Features
|
||||
## Self-Service
|
||||
Leave to developers the freedom to self-provision their cluster resources according to the assigned boundaries.
|
||||
|
||||
## Preventing Clusters Sprawl
|
||||
Share a single cluster with multiple teams, groups of users, or departments by saving operational and management efforts.
|
||||
|
||||
## Governance
|
||||
Leverage Kubernetes Admission Controllers to enforce the industry security best practices and meet legal requirements.
|
||||
|
||||
## Resources Control
|
||||
Take control of the resources consumed by users while preventing them to overtake.
|
||||
|
||||
## Native Experience
|
||||
Provide multi-tenancy with a native Kubernetes experience without introducing additional management layers, plugins, or customised binaries.
|
||||
|
||||
## GitOps ready
|
||||
Capsule is completely declarative and GitOps ready.
|
||||
|
||||
## Bring your own device (BYOD)
|
||||
Assign to tenants a dedicated set of compute, storage, and network resources and avoid the noisy neighbors' effect.
|
||||
|
||||
# Common use cases for Capsule
|
||||
Please, refer to the corresponding [section](./use-cases/overview.md) in the project documentation for a detailed list of common use cases that Capsule can address.
|
||||
|
||||
# What’s next
|
||||
Have a fun with Capsule:
|
||||
|
||||
* [Getting Started](./getting-started.md)
|
||||
* [Use Cases](./use-cases/overview.md)
|
||||
* [Contributing](./contributing.md)
|
||||
* [References](./references.md)
|
||||
697
docs/operator/references.md
Normal file
@@ -0,0 +1,697 @@
|
||||
# Reference
|
||||
|
||||
* [Custom Resource Definition](#customer-resource-definition)
|
||||
* [Metadata](#metadata)
|
||||
* [name](#name)
|
||||
* [Spec](#spec)
|
||||
* [owner](#owner)
|
||||
* [nodeSelector](#nodeSelector)
|
||||
* [namespaceQuota](#namespaceQuota)
|
||||
* [namespacesMetadata](#namespacesMetadata)
|
||||
* [servicesMetadata](#servicesMetadata)
|
||||
* [ingressClasses](#ingressClasses)
|
||||
* [ingressHostnames](#ingressHostnames)
|
||||
* [storageClasses](#storageClasses)
|
||||
* [containerRegistries](#containerRegistries)
|
||||
* [additionalRoleBindings](#additionalRoleBindings)
|
||||
* [resourceQuotas](#resourceQuotas)
|
||||
* [limitRanges](#limitRanges)
|
||||
* [networkPolicies](#networkPolicies)
|
||||
* [externalServiceIPs](#externalServiceIPs)
|
||||
* [Status](#status)
|
||||
* [size](#size)
|
||||
* [namespaces](#namespaces)
|
||||
* [Role Based Access Control](#role-based-access-control)
|
||||
* [Admission Controllers](#admission-controller)
|
||||
* [Command Options](#command-options)
|
||||
* [Created Resources](#created-resources)
|
||||
|
||||
|
||||
## Custom Resource Definition
|
||||
Capsule operator uses a single Custom Resources Definition (CRD) for _Tenants_. Please, see the [Tenant Custom Resource Definition](https://github.com/clastix/capsule/blob/master/config/crd/bases/capsule.clastix.io_tenants.yaml). In Caspule, Tenants are cluster wide resources. You need for cluster level permissions to work with tenants.
|
||||
|
||||
### Metadata
|
||||
#### name
|
||||
Metadata `name` can contain any valid symbol from the regex: `[a-z0-9]([-a-z0-9]*[a-z0-9])?`.
|
||||
|
||||
### Spec
|
||||
#### owner
|
||||
The field `owner` is the only mandatory spec in a _Tenant_ manifest. It specifies the ownership of the tenant:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
owner: # required
|
||||
name: <name>
|
||||
kind: <User|Group>
|
||||
```
|
||||
|
||||
The user and group names should be valid identities. Capsule does not care about the authentication strategy used in the cluster and all the Kubernetes methods of [Authentication](https://kubernetes.io/docs/reference/access-authn-authz/authentication/) are supported. The only requirement to use Capsule is to assign tenant users to the the group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
Assignment to a group depends on the used authentication strategy.
|
||||
|
||||
For example, if you are using `capsule.clastix.io`, users authenticated through a _X.509_ certificate must have `capsule.clastix.io` as _Organization_: `-subj "/CN=${USER}/O=capsule.clastix.io"`
|
||||
|
||||
Users authenticated through an _OIDC token_ must have
|
||||
|
||||
```json
|
||||
...
|
||||
"users_groups": [
|
||||
"capsule.clastix.io",
|
||||
"other_group"
|
||||
]
|
||||
```
|
||||
|
||||
Permissions are controlled by RBAC.
|
||||
|
||||
#### nodeSelector
|
||||
Field `nodeSelector` specifies the label to control the placement of pods on a given pool of worker nodes:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
nodeSelector:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
All namesapces created within the tenant will have the annotation:
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/node-selector: 'key=value'
|
||||
```
|
||||
|
||||
This annotation tells the Kubernetes scheduler to place pods on the nodes having that label:
|
||||
|
||||
```yaml
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: sample
|
||||
spec:
|
||||
nodeSelector:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
> NB:
|
||||
> While Capsule just enforces the annotation `scheduler.alpha.kubernetes.io/node-selector` at namespace level,
|
||||
> the `nodeSelector` field in the pod template is under the control of the default _PodNodeSelector_ enabled
|
||||
> on the Kubernetes API server using the flag `--enable-admission-plugins=PodNodeSelector`.
|
||||
|
||||
Please, see how to [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) documentation.
|
||||
|
||||
The tenant owner is not allowed to change or remove the annotation above from the namespace.
|
||||
|
||||
#### namespaceQuota
|
||||
Field `namespaceQuota` specifies the maximum number of namespaces allowed for that tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
namespaceQuota: <quota>
|
||||
```
|
||||
Once the namespace quota assigned to the tenant has been reached, yhe tenant owner cannot create further namespaces.
|
||||
|
||||
#### namespacesMetadata
|
||||
Field `namespacesMetadata` specifies additional labels and annotations the Capsule operator places on any _Namespace_ in the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
namespacesMetadata:
|
||||
additionalAnnotations:
|
||||
<annotations>
|
||||
additionalLabels:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
Al namespaces in the tenant will have:
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
<annotations>
|
||||
labels:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
The tenant owner is not allowed to change or remove such labels and annotations from the namespace.
|
||||
|
||||
#### servicesMetadata
|
||||
Field `servicesMetadata` specifies additional labels and annotations the Capsule operator places on any _Service_ in the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
servicesMetadata:
|
||||
additionalAnnotations:
|
||||
<annotations>
|
||||
additionalLabels:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
Al services in the tenant will have:
|
||||
|
||||
```yaml
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
<annotations>
|
||||
labels:
|
||||
<key>: <value>
|
||||
```
|
||||
|
||||
The tenant owner is not allowed to change or remove such labels and annotations from the _Service_.
|
||||
|
||||
#### ingressClasses
|
||||
Field `ingressClasses` specifies the _IngressClass_ assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
ingressClasses:
|
||||
allowed:
|
||||
- <class>
|
||||
allowedRegex: <regex>
|
||||
```
|
||||
|
||||
Capsule assures that all the _Ingress_ resources created in the tenant can use only one of the allowed _IngressClass_.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: <name>
|
||||
namespace:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: <class>
|
||||
```
|
||||
|
||||
> NB: _Ingress_ resources are supported in both the versions, `networking.k8s.io/v1beta1` and `networking.k8s.io/v1`.
|
||||
|
||||
Allowed _IngressClasses_ are reported into namespaces as annotations, so the tenant owner can check them
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
capsule.clastix.io/ingress-classes: <class>
|
||||
capsule.clastix.io/ingress-classes-regexp: <regex>
|
||||
```
|
||||
Any tentative of tenant owner to use a not allowed _IngressClass_ will fail.
|
||||
|
||||
#### ingressHostnames
|
||||
Field `ingressHostnames` specifies the allowed hostnames in _Ingresses_ for the given tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
ingressHostnames:
|
||||
allowed:
|
||||
- <hostname>
|
||||
allowedRegex: <regex>
|
||||
```
|
||||
|
||||
Capsule assures that all _Ingress_ resources created in the tenant can use only one of the allowed hostnames.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: <name>
|
||||
namespace:
|
||||
annotations:
|
||||
spec:
|
||||
rules:
|
||||
- host: <hostname>
|
||||
http: {}
|
||||
```
|
||||
|
||||
> NB: _Ingress_ resources are supported in both the versions, `networking.k8s.io/v1beta1` and `networking.k8s.io/v1`.
|
||||
|
||||
Any tentative of tenant owner to use one of not allowed hostnames will fail.
|
||||
|
||||
#### storageClasses
|
||||
Field `storageClasses` specifies the _StorageClasses_ assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
storageClasses:
|
||||
allowed:
|
||||
- <class>
|
||||
allowedRegex: <regex>
|
||||
```
|
||||
|
||||
Capsule assures that all _PersistentVolumeClaim_ resources created in the tenant can use only one of the allowed _StorageClasses_.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: <name>
|
||||
namespace:
|
||||
spec:
|
||||
storageClassName: <class>
|
||||
```
|
||||
|
||||
Allowed _StorageClasses_ are reported into namespaces as annotations, so the tenant owner can check them
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
capsule.clastix.io/storage-classes: <class>
|
||||
capsule.clastix.io/storage-classes-regexp: <regex>
|
||||
```
|
||||
|
||||
Any tentative of tenant owner to use a not allowed _StorageClass_ will fail.
|
||||
|
||||
#### containerRegistries
|
||||
Field `containerRegistries` specifies the ttrusted image registries assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
containerRegistries:
|
||||
allowed:
|
||||
- <registry>
|
||||
allowedRegex: <regex>
|
||||
```
|
||||
|
||||
Capsule assures that all _Pods_ resources created in the tenant can use only one of the allowed trusted registries.
|
||||
|
||||
Allowed registries are reported into namespaces as annotations, so the tenant owner can check them
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
annotations:
|
||||
capsule.clastix.io/allowed-registries-regexp: <regex>
|
||||
capsule.clastix.io/registries: <registry>
|
||||
```
|
||||
|
||||
Any tentative of tenant owner to use a not allowed registry will fail.
|
||||
|
||||
> NB:
|
||||
> In case of naked and official images hosted on Docker Hub, Capsule is going
|
||||
> to retrieve the registry even if it's not explicit: a `busybox:latest` Pod
|
||||
> running on a Tenant allowing `docker.io` will not blocked, even if the image
|
||||
> field is not explicit as `docker.io/busybox:latest`.
|
||||
|
||||
#### additionalRoleBindings
|
||||
Field `additionalRoleBindings` specifies additional _RoleBindings_ assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: <ClusterRole>
|
||||
subjects:
|
||||
- kind: <Group|User|ServiceAccount>
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: <name>
|
||||
```
|
||||
|
||||
Capsule will ensure that all namespaces in the tenant always contain the _RoleBinding_ for the given _ClusterRole_.
|
||||
|
||||
#### resourceQuotas
|
||||
Field `resourceQuotas` specifies a list of _ResourceQuota_ resources assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
resourceQuotas:
|
||||
- hard:
|
||||
limits.cpu: <hard_value>
|
||||
limits.memory: <hard_value>
|
||||
requests.cpu: <hard_value>
|
||||
requests.memory: <hard_value>
|
||||
```
|
||||
|
||||
Please, refer to [ResourceQuota](https://kubernetes.io/docs/concepts/policy/resource-quotas/) documentation for the subject.
|
||||
|
||||
The assigned quota are inherited by any namespace created in the tenant
|
||||
|
||||
```yaml
|
||||
kind: ResourceQuota
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: compute
|
||||
namespace:
|
||||
labels:
|
||||
capsule.clastix.io/resource-quota=0
|
||||
capsule.clastix.io/tenant=tenant
|
||||
annotations:
|
||||
# used resources in the tenant
|
||||
quota.capsule.clastix.io/used-limits.cpu=<tenant_used_value>
|
||||
quota.capsule.clastix.io/used-limits.memory=<tenant_used_value>
|
||||
quota.capsule.clastix.io/used-requests.cpu=<tenant_used_value>
|
||||
quota.capsule.clastix.io/used-requests.memory=<tenant_used_value>
|
||||
# hard quota for the tenant
|
||||
quota.capsule.clastix.io/hard-limits.cpu=<tenant_hard_value>
|
||||
quota.capsule.clastix.io/hard-limits.memory=<tenant_hard_value>
|
||||
quota.capsule.clastix.io/hard-requests.cpu=<tenant_hard_value>
|
||||
quota.capsule.clastix.io/hard-requests.memory=<tenant_hard_value>
|
||||
spec:
|
||||
hard:
|
||||
limits.cpu: <hard_value>
|
||||
limits.memory: <hard_value>
|
||||
requests.cpu: <hard_value>
|
||||
requests.memory: <hard_value>
|
||||
status:
|
||||
hard:
|
||||
limits.cpu: <namespace_hard_value>
|
||||
limits.memory: <namespace_hard_value>
|
||||
requests.cpu: <namespace_hard_value>
|
||||
requests.memory: <namespace_hard_value>
|
||||
used:
|
||||
limits.cpu: <namespace_used_value>
|
||||
limits.memory: <namespace_used_value>
|
||||
requests.cpu: <namespace_used_value>
|
||||
requests.memory: <namespace_used_value>
|
||||
```
|
||||
|
||||
The Capsule operator aggregates _ResourceQuota_ at tenant level, so that the hard quota is never crossed for the given tenant. This permits the tenant owner to consume resources in the tenant regardless of the namespace.
|
||||
|
||||
The annotations
|
||||
|
||||
```yaml
|
||||
quota.capsule.clastix.io/used-<resource>=<tenant_used_value>
|
||||
quota.capsule.clastix.io/hard-<resource>=<tenant_hard_value>
|
||||
```
|
||||
|
||||
are updated in realtime by Capsule, according to the actual aggredated usage of resource in the tenant.
|
||||
|
||||
> NB:
|
||||
> While Capsule controls quota at tenant level, at namespace level the quota enforcement
|
||||
> is under the control of the default _ResourceQuota Admission Controller_ enabled on the
|
||||
> Kubernetes API server using the flag `--enable-admission-plugins=ResourceQuota`.
|
||||
|
||||
The tenant owner is not allowed to change or remove the _ResourceQuota_ from the namespace.
|
||||
|
||||
#### limitRanges
|
||||
Field `limitRanges` specifies the _LimitRanges_ assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
limitRanges:
|
||||
- limits:
|
||||
- type: Pod
|
||||
max:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
min:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
- type: Container
|
||||
default:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
defaultRequest:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
max:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
min:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
- type: PersistentVolumeClaim
|
||||
max:
|
||||
storage: <value>
|
||||
min:
|
||||
storage: <value>
|
||||
```
|
||||
|
||||
Please, refer to [LimitRange](https://kubernetes.io/docs/concepts/policy/limit-range/) documentation for the subject.
|
||||
|
||||
The assigned _LimitRanges_ are inherited by any namespace created in the tenant
|
||||
|
||||
```yaml
|
||||
kind: LimitRange
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: <name>
|
||||
namespace:
|
||||
spec:
|
||||
limits:
|
||||
- type: Pod
|
||||
max:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
min:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
- type: Container
|
||||
default:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
defaultRequest:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
max:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
min:
|
||||
cpu: <value>
|
||||
memory: <value>
|
||||
- type: PersistentVolumeClaim
|
||||
max:
|
||||
storage: <value>
|
||||
min:
|
||||
storage: <value>
|
||||
```
|
||||
|
||||
> NB:
|
||||
> Limit ranges enforcement for a single pod, container, and persistent volume
|
||||
> claim is done by the default _LimitRanger Admission Controller_ enabled on
|
||||
> the Kubernetes API server: using the flag
|
||||
> `--enable-admission-plugins=LimitRanger`.
|
||||
|
||||
Being the limit range specific of single resources, there is no aggregate to count.
|
||||
|
||||
The tenant owner is not allowed to change or remove _LimitRanges_ from the namespace.
|
||||
|
||||
#### networkPolicies
|
||||
Field `networkPolicies` specifies the _NetworkPolicies_ assigned to the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
networkPolicies:
|
||||
- policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: <value>
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector: {}
|
||||
- podSelector: {}
|
||||
- ipBlock:
|
||||
cidr: <value>
|
||||
podSelector: {}
|
||||
```
|
||||
|
||||
Please, refer to [NetworkPolicies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) documentation for the subjects of a _NetworkPolicy_.
|
||||
|
||||
The assigned _NetworkPolicies_ are inherited by any namespace created in the tenant.
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: <name>
|
||||
namespace:
|
||||
spec:
|
||||
podSelector: {}
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector: {}
|
||||
- podSelector: {}
|
||||
- ipBlock:
|
||||
cidr: <value>
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: <value>
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
```
|
||||
|
||||
The tenant owner can create, patch and delete additional _NetworkPolicy_ to refine the assigned one. However, the tenant owner cannot delete the _NetworkPolicies_ set at tenant level.
|
||||
|
||||
#### externalServiceIPs
|
||||
Field `externalServiceIPs` specifies the external IPs that can be used in _Services_ with type `ClusterIP`.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
externalServiceIPs:
|
||||
allowed:
|
||||
- <cidr>
|
||||
```
|
||||
|
||||
Capsule will ensure that all _Services_ in the tenant can contain only the allowed external IPs. This mitigate the [_CVE-2020-8554_] vulnerability where a potential attacker, able to create a _Service_ with type `ClusterIP` and set the `externalIPs` field, can intercept traffic to that IP. Leave only the allowed CIDRs list to be set as `externalIPs` field in a _Service_ with type `ClusterIP`.
|
||||
|
||||
To prevent users to set the `externalIPs` field, use an empty allowed list:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
externalServiceIPs:
|
||||
allowed: []
|
||||
```
|
||||
|
||||
> NB: Missing of this controller, it exposes your cluster to the vulnerability [_CVE-2020-8554_].
|
||||
|
||||
### Status
|
||||
#### size
|
||||
Status field `size` reports the number of namespaces belonging to the tenant. It is reported as `NAMESPACE COUNT` in the `kubectl` output:
|
||||
|
||||
```
|
||||
$ kubectl get tnt
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
cap 9 1 joe User {"pool":"cmp"} 5d4h
|
||||
gas 6 2 alice User {"node":"worker"} 5d4h
|
||||
oil 9 3 alice User {"pool":"cmp"} 5d4h
|
||||
sample 9 0 alice User {"key":"value"} 29h
|
||||
```
|
||||
|
||||
#### namespaces
|
||||
Status field `namespaces` reports the list of all namespaces belonging to the tenant.
|
||||
|
||||
```yaml
|
||||
...
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: tenant
|
||||
spec:
|
||||
...
|
||||
status:
|
||||
namespaces:
|
||||
oil-development
|
||||
oil-production
|
||||
oil-marketing
|
||||
size: 3
|
||||
```
|
||||
|
||||
## Role Based Access Control
|
||||
In the current implementation, the Capsule operator requires cluster admin permissions to fully operate.
|
||||
|
||||
## Admission Controllers
|
||||
Capsule implements Kubernetes multi-tenancy capabilities using a minimum set of standard [Admission Controllers](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/) enabled on the Kubernetes APIs server.
|
||||
|
||||
Here the list of required Admission Controllers you have to enable to get full support from Capsule:
|
||||
|
||||
* PodNodeSelector
|
||||
* LimitRanger
|
||||
* ResourceQuota
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
|
||||
In addition to the required controllers above, Capsule implements its own set through the [Dynamic Admission Controller](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) mechanism, providing callbacks to add further validation or resource patching.
|
||||
|
||||
To see Admission Controls installed by Capsule:
|
||||
|
||||
```
|
||||
$ kubectl get ValidatingWebhookConfiguration
|
||||
NAME WEBHOOKS AGE
|
||||
capsule-validating-webhook-configuration 8 2h
|
||||
|
||||
$ kubectl get MutatingWebhookConfiguration
|
||||
NAME WEBHOOKS AGE
|
||||
capsule-mutating-webhook-configuration 1 2h
|
||||
```
|
||||
|
||||
## Command Options
|
||||
The Capsule operator provides following command options:
|
||||
|
||||
Option | Description | Default
|
||||
--- | --- | ---
|
||||
`--metrics-addr` | The address and port where `/metrics` are exposed. | `127.0.0.1:8080`
|
||||
`--enable-leader-election` | Start a leader election client and gain leadership before executing the main loop. | `true`
|
||||
`--force-tenant-prefix` | Force the tenant name as prefix for namespaces: `<tenant_name>-<namespace>`. | `false`
|
||||
`--zap-log-level` | The log verbosity with a value from 1 to 10 or the basic keywords. | `4`
|
||||
`--zap-devel` | The flag to get the stack traces for deep debugging. | `null`
|
||||
`--capsule-user-group` | Override the Capsule group to which all tenant owners must belong. | `capsule.clastix.io`
|
||||
`--protected-namespace-regex` | Disallows creation of namespaces matching the passed regexp. | `null`
|
||||
|
||||
## Created Resources
|
||||
Once installed, the Capsule operator creates the following resources in your cluster:
|
||||
|
||||
```
|
||||
NAMESPACE RESOURCE
|
||||
customresourcedefinition.apiextensions.k8s.io/tenants.capsule.clastix.io
|
||||
clusterrole.rbac.authorization.k8s.io/capsule-proxy-role
|
||||
clusterrole.rbac.authorization.k8s.io/capsule-metrics-reader
|
||||
mutatingwebhookconfiguration.admissionregistration.k8s.io/capsule-mutating-webhook-configuration
|
||||
validatingwebhookconfiguration.admissionregistration.k8s.io/capsule-validating-webhook-configuration
|
||||
capsule-system clusterrolebinding.rbac.authorization.k8s.io/capsule-manager-rolebinding
|
||||
capsule-system clusterrolebinding.rbac.authorization.k8s.io/capsule-proxy-rolebinding
|
||||
capsule-system secret/capsule-ca
|
||||
capsule-system secret/capsule-tls
|
||||
capsule-system service/capsule-controller-manager-metrics-service
|
||||
capsule-system service/capsule-webhook-service
|
||||
capsule-system deployment.apps/capsule-controller-manager
|
||||
```
|
||||
99
docs/operator/use-cases/create-namespaces.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Create namespaces
|
||||
Alice can create a new namespace in her tenant, as simply:
|
||||
|
||||
```
|
||||
alice@caas# kubectl create ns oil-production
|
||||
```
|
||||
|
||||
> Note that Alice started the name of her namespace with an identifier of her
|
||||
> tenant: this is not a strict requirement but it is highly suggested because
|
||||
> it is likely that many different tenants would like to call their namespaces
|
||||
> as `production`, `test`, or `demo`, etc.
|
||||
>
|
||||
> The enforcement of this naming convention, however, is optional and can be controlled by the cluster administrator with the `--force-tenant-prefix` option as argument of the Capsule controller.
|
||||
|
||||
When Alice creates the namespace, the Capsule controller, listening for creation and deletion events assigns to Alice the following roles:
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: namespace:admin
|
||||
namespace: oil-production
|
||||
subjects:
|
||||
- kind: User
|
||||
name: alice
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: namespace-deleter
|
||||
namespace: oil-production
|
||||
subjects:
|
||||
- kind: User
|
||||
name: alice
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: namespace-deleter
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
```
|
||||
|
||||
Alice is the admin of the namespaces:
|
||||
|
||||
```
|
||||
alice@caas# kubectl get rolebindings -n oil-production
|
||||
NAME ROLE AGE
|
||||
namespace:admin ClusterRole/admin 9m5s
|
||||
namespace-deleter ClusterRole/admin 9m5s
|
||||
```
|
||||
|
||||
The said Role Binding resources are automatically created by Capsule when Alice creates a namespace in the tenant.
|
||||
|
||||
Alice can deploy any resource in the namespace, according to the predefined
|
||||
[`admin` cluster role](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles).
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-development run nginx --image=docker.io/nginx
|
||||
alice@caas# kubectl -n oil-development get pods
|
||||
```
|
||||
|
||||
Alice can create additional namespaces, according to the `namespaceQuota` field of the tenant manifest:
|
||||
|
||||
```
|
||||
alice@caas# kubectl create ns oil-development
|
||||
alice@caas# kubectl create ns oil-test
|
||||
```
|
||||
|
||||
While Alice creates namespace resources, the Capsule controller updates the status of the tenant so Bill, the cluster admin, can check its status:
|
||||
|
||||
```
|
||||
bill@caas# kubectl describe tenant oil
|
||||
```
|
||||
|
||||
```yaml
|
||||
...
|
||||
status:
|
||||
namespaces:
|
||||
oil-development
|
||||
oil-production
|
||||
oil-test
|
||||
size: 3 # current namespace count
|
||||
...
|
||||
```
|
||||
|
||||
Once the namespace quota assigned to the tenant has been reached, Alice cannot create further namespaces
|
||||
|
||||
```
|
||||
alice@caas# kubectl create ns oil-training
|
||||
Error from server (Cannot exceed Namespace quota: please, reach out the system administrators): admission webhook "quota.namespace.capsule.clastix.io" denied the request.
|
||||
```
|
||||
|
||||
The enforcement on the maximum number of Namespace resources per Tenant is in charge of the Capsule controller via its Dynamic Admission Webhook capability.
|
||||
|
||||
# What’s next
|
||||
See how Alice, the tenant owner, can assign different user roles in the tenant. [Assign permissions](./permissions.md).
|
||||
91
docs/operator/use-cases/custom-resources.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Create Custom Resources
|
||||
Capsule operator the admin permissions to the tenant's users but only limited to their namespaces. To achieve that, it assign the ClusterRole [admin](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) to the tenant owner. This ClusterRole does not permit the installation of custom resources in the namespaces.
|
||||
|
||||
In order to leave the tenant owner to create Custom Resources in their namespaces, the cluster admin defines a proper Cluster Role. For example:
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: argoproj-provisioner
|
||||
rules:
|
||||
- apiGroups:
|
||||
- argoproj.io
|
||||
resources:
|
||||
- applications
|
||||
- appprojects
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
```
|
||||
|
||||
Bill can assign this role to any namespace in the Alice's tenant by setting it in the tenant manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: 'argoproj-provisioner'
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: alice
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: joe
|
||||
```
|
||||
|
||||
or in case of Group type owners:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: 'argoproj-provisioner'
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: alice
|
||||
```
|
||||
|
||||
With the given specification, Capsule will ensure that all Alice's namespaces will contain a _RoleBinding_ for the specified _Cluster Role_. For example, in the `oil-production` namespace, Alice will see:
|
||||
|
||||
```yaml
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: capsule-oil-argoproj-provisioner
|
||||
namespace: oil-production
|
||||
subjects:
|
||||
- kind: User
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: alice
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: argoproj-provisioner
|
||||
```
|
||||
|
||||
With the above example, Capsule is leaving the tenant owner to create namespaced custom resources.
|
||||
|
||||
> Nota bene: a tenant owner having the admin scope on its namespaces only, does not have the permission to create Custom Resources Definitions (CRDs) because this requires a cluster admin permission level. Only Bill, the cluster admin, can create CRDs. This is a known limitation of any multi-tenancy environment based on a single Kubernetes cluster.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can set taints on the Alice's namespaces. [Taint namespaces](./taint-namespaces.md).
|
||||
63
docs/operator/use-cases/images-registries.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Assign Trusted Images Registries
|
||||
Bill, the cluster admin, can set a strict policy on the applications running into Alice's tenant: he'd like to allow running just images hosted on a list of specific container registries.
|
||||
|
||||
The spec `containerRegistries` addresses this task and can provide combination with hard enforcement using a list of allowed values.
|
||||
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
containerRegistries:
|
||||
allowed:
|
||||
- docker.io
|
||||
- quay.io
|
||||
allowedRegex: ''
|
||||
```
|
||||
|
||||
> In case of naked and official images hosted on Docker Hub, Capsule is going
|
||||
> to retrieve the registry even if it's not explicit: a `busybox:latest` Pod
|
||||
> running on a Tenant allowing `docker.io` will not blocked, even if the image
|
||||
> field is not explicit as `docker.io/busybox:latest`.
|
||||
|
||||
|
||||
Alternatively, use a valid regular expression for a maximum flexibility
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
containerRegistries:
|
||||
allowed: []
|
||||
regex: "internal.registry.\\w.tld"
|
||||
```
|
||||
|
||||
A Pod running `internal.registry.foo.tld` as registry will be allowed, as well `internal.registry.bar.tld` since these are matching the regular expression.
|
||||
|
||||
> You can also set a catch-all as .* to allow every kind of registry,
|
||||
> that would be the same result of unsetting `containerRegistries` at all
|
||||
|
||||
As per Ingress and Storage classes, also the allowed registries can be inspected from the Tenant's namespace
|
||||
|
||||
```
|
||||
alice@caas# kubectl describe ns oil-production
|
||||
Name: oil-production
|
||||
Labels: capsule.clastix.io/tenant=oil
|
||||
Annotations: capsule.clastix.io/allowed-registries: docker.io
|
||||
capsule.clastix.io/allowed-registries-regexp: ^registry\.internal\.\w+$
|
||||
...
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign Pod Security Policies to Alice's tenant. [Assign Pod Security Policies](./pod-security-policies.md).
|
||||
|
||||
75
docs/operator/use-cases/ingress-classes.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Assign Ingress Classes
|
||||
An Ingress Controller is used in Kubernetes to publish services and applications outside of the cluster. An Ingress Controller can be provisioned to accept only Ingresses with a given Ingress Class.
|
||||
|
||||
Bill can assign a set of dedicated Ingress Classes to the `oil` tenant to force the applications in the `oil` tenant to be published only by the assigned Ingress Controller:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
ingressClasses:
|
||||
allowed:
|
||||
- oil
|
||||
...
|
||||
```
|
||||
|
||||
It is also possible to use regular expression for assigning Ingress Classes:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
ingressClasses:
|
||||
allowedRegex: "^oil-.*$"
|
||||
...
|
||||
```
|
||||
|
||||
The Capsule controller assures that all Ingresses created in the tenant can use only one of the valid Ingress Classes. Alice, as tenant owner, gets the list of valid Ingress Classes by checking any of her namespaces:
|
||||
|
||||
```
|
||||
alice@caas# kubectl describe ns oil-production
|
||||
Name: oil-production
|
||||
Labels: capsule.clastix.io/tenant=oil
|
||||
Annotations: capsule.clastix.io/ingress-classes: oil
|
||||
capsule.clastix.io/ingress-classes-regexp: ^oil-.*$
|
||||
...
|
||||
```
|
||||
|
||||
Alice creates an Ingress using a valid Ingress Class in the annotation:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: oil-production
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: oil
|
||||
spec:
|
||||
rules:
|
||||
- host: web.oil-inc.com
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: nginx
|
||||
servicePort: 80
|
||||
path: /
|
||||
```
|
||||
|
||||
Any tentative of Alice to use a not valid Ingress Class, e.g. `default`, will fail.
|
||||
|
||||
> The effect of this policy is that the services created in the tenant will be published
|
||||
> only on the Ingress Controller designated by Bill to accept one of the allowed Ingress Classes.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign a set of dedicated ingress hostnames to Alice's tenant. [Assign Ingress Hostnames](./ingress-hostnames.md).
|
||||
65
docs/operator/use-cases/ingress-hostnames.md
Normal file
@@ -0,0 +1,65 @@
|
||||
# Assign Ingress Hostnames
|
||||
Bill can assign a set of dedicated ingress hostnames to the `oil` tenant in order to force the applications in the tenant to be published only using the given hostnames:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
ingressHostnames:
|
||||
allowed:
|
||||
- *.oil.acmecorp.com
|
||||
...
|
||||
```
|
||||
|
||||
It is also possible to use regular expression for assigning Ingress Classes:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
ingressHostnames:
|
||||
allowedRegex: "^oil-acmecorp.*$"
|
||||
...
|
||||
```
|
||||
|
||||
The Capsule controller assures that all Ingresses created in the tenant can use only one of the valid hostnames.
|
||||
|
||||
Alice creates an Ingress using an allowed hostname
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: nginx
|
||||
namespace: oil-production
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: oil
|
||||
spec:
|
||||
rules:
|
||||
- host: web.oil.acmecorp.com
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: nginx
|
||||
port:
|
||||
number: 80
|
||||
```
|
||||
|
||||
|
||||
Any tentative of Alice to use a not valid hostname, e.g. `web.gas.acmecorp.org`, will fail.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign a Storage Class to Alice's tenant. [Assign Storage Classes](./storage-classes.md).
|
||||
110
docs/operator/use-cases/multiple-tenants.md
Normal file
@@ -0,0 +1,110 @@
|
||||
# Assign multiple tenants to an owner
|
||||
In some scenarios, it's likely that a single team is responsible for multiple lines of business. For example, in our sample organization Acme Corp., Alice is responsible for both the Oil and Gas lines of business. Ans it's more probable that Alice requires two different tenants, for example `oil` and `gas` to keep things isolated.
|
||||
|
||||
By design, the Capsule operator does not permit hierarchy of tenants, since all tenants are at the same levels. However, we can assign the ownership of multiple tenants to the same user or group of users.
|
||||
|
||||
Bill, the cluster admin, creates multiple tenants having `alice` as owner:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespaceQuota: 3
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespaceQuota: 9
|
||||
```
|
||||
|
||||
So that
|
||||
|
||||
```
|
||||
bill@caas# kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 3 alice User 3h
|
||||
gas 9 0 alice User 1m
|
||||
```
|
||||
|
||||
Alternatively, the ownership can be assigned to a group called `oil-and-gas`:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: oil-and-gas
|
||||
kind: Group
|
||||
namespaceQuota: 3
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: gas
|
||||
spec:
|
||||
owner:
|
||||
name: oil-and-gas
|
||||
kind: Group
|
||||
namespaceQuota: 9
|
||||
```
|
||||
|
||||
So that
|
||||
|
||||
```
|
||||
bill@caas# kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 3 3 oil-and-gas Group 3h
|
||||
gas 9 0 oil-and-gas Group 1m
|
||||
```
|
||||
|
||||
The two tenants still remain isolated each other in terms of resources assignments, e.g. _ResourceQuota_, _Nodes Pool_, _Storage Calsses_ and _Ingress Classes_, and in terms of governance, e.g. _NetworkPolicies_, _PodSecurityPolicies_, _Trusted Registries_, etc.
|
||||
|
||||
|
||||
When Alice logs in CaaS platform, she has access to all namespaces belonging to both the `oil` and `gas` tenants.
|
||||
|
||||
```
|
||||
alice@caas# kubectl create ns oil-production
|
||||
alice@caas# kubectl create ns gas-production
|
||||
```
|
||||
|
||||
When the enforcement of the naming convention with the `--force-tenant-prefix` option, is enabled, the namespaces are automatically assigned to the right tenant by Capsule because the operator does a lookups on the tenant names. If the `--force-tenant-prefix` option, is not set, Alice needs to specify the tenant name as a label `capsule.clastix.io/tenant=<desired_tenant>` in the namespace manifest:
|
||||
|
||||
```yaml
|
||||
cat <<EOF > gas-production-ns.yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: gas-production
|
||||
labels:
|
||||
capsule.clastix.io/tenant: gas
|
||||
EOF
|
||||
|
||||
kubectl create -f gas-production-ns.yaml
|
||||
```
|
||||
|
||||
> If not specified, Capsule will deny with the following message:
|
||||
>
|
||||
>`Unable to assign namespace to tenant. Please use capsule.clastix.io/tenant label when creating a namespace.`
|
||||
|
||||
# What’s next
|
||||
This end our tour in Capsule use cases. As we improve Capsule, more use cases about multi-tenancy, policy admission control, and cluster governance will be covered in the future. Stay tuned!
|
||||
103
docs/operator/use-cases/network-policies.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# Assign Network Policies
|
||||
Kubernetes network policies allow controlling network traffic between namespaces and between pods in the same namespace. Bill, the cluster admin, can enforce network traffic isolation between different tenants while leaving to Alice, the tenant owner, the freedom to set isolation between namespaces in the same tenant or even between pods in the same namespace.
|
||||
|
||||
To meet this requirement, Bill needs to define network policies that deny pods belonging to Alice's namespaces to access pods in namespaces belonging to other tenants, e.g. Bob's tenant `water`, or in system namespaces, e.g. `kube-system`.
|
||||
|
||||
Also, Bill can make sure pods belonging to a tenant namespace cannot access other network infrastructure like cluster nodes, load balancers, and virtual machines running other services.
|
||||
|
||||
Bill can set network policies in the tenant manifest, according to the requirements:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
networkPolicies:
|
||||
- policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 0.0.0.0/0
|
||||
except:
|
||||
- 192.168.0.0/16
|
||||
ingress:
|
||||
- from:
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
- podSelector: {}
|
||||
- ipBlock:
|
||||
cidr: 192.168.0.0/16
|
||||
podSelector: {}
|
||||
```
|
||||
|
||||
The Capsule controller, watching for namespace creation, creates the Network Policies for each namespace in the tenant.
|
||||
|
||||
Alice has access to these network policies:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 42h
|
||||
```
|
||||
|
||||
Alice can create, patch, and delete additional network policies within her namespaces
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production auth can-i get networkpolicies
|
||||
yes
|
||||
|
||||
alice@caas# kubectl -n oil-production auth can-i delete networkpolicies
|
||||
yes
|
||||
|
||||
alice@caas# kubectl -n oil-production auth can-i patch networkpolicies
|
||||
yes
|
||||
```
|
||||
|
||||
For example, she can create
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
name: production-network-policy
|
||||
namespace: oil-production
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
```
|
||||
|
||||
Check all the network policies
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production get networkpolicies
|
||||
NAME POD-SELECTOR AGE
|
||||
capsule-oil-0 <none> 42h
|
||||
production-network-policy <none> 3m
|
||||
```
|
||||
|
||||
an delete the namespace network-policies
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production delete networkpolicy production-network-policy
|
||||
```
|
||||
|
||||
|
||||
However, the Capsule controller prevents Alice to delete the tenant network policy:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production delete networkpolicy capsule-oil-0
|
||||
Error from server (Capsule Network Policies cannot be deleted: please, reach out the system administrators): admission webhook "validating.network-policy.capsule.clastix.io" denied the request: Capsule Network Policies cannot be deleted: please, reach out the system administrators
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign trusted images registries to Alice's tenant. [Assign Trusted Images Registries](./images-registries.md).
|
||||
53
docs/operator/use-cases/nodes-pool.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Assign a nodes pool
|
||||
Bill, the cluster admin, can dedicate a pool of worker nodes to the `oil` tenant, to isolate the tenant applications from other noisy neighbors.
|
||||
|
||||
These nodes are labeled by Bill as `pool=oil`
|
||||
|
||||
```
|
||||
bill@caas# kubectl get nodes --show-labels
|
||||
|
||||
NAME STATUS ROLES AGE VERSION LABELS
|
||||
...
|
||||
worker06.acme.com Ready worker 8d v1.18.2 pool=oil
|
||||
worker07.acme.com Ready worker 8d v1.18.2 pool=oil
|
||||
worker08.acme.com Ready worker 8d v1.18.2 pool=oil
|
||||
```
|
||||
|
||||
The label `pool=oil` is defined as node selector in the tenant manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
nodeSelector:
|
||||
pool: oil
|
||||
...
|
||||
```
|
||||
|
||||
The Capsule controller makes sure that any namespace created in the tenant has the annotation: `scheduler.alpha.kubernetes.io/node-selector: pool=oil`. This annotation tells the scheduler of Kubernetes to assign the node selector `pool=oil` to all the pods deployed in the tenant.
|
||||
|
||||
The effect is that all the pods deployed by Alice are placed only on the designated pool of nodes.
|
||||
|
||||
Any tentative of Alice to change the selector on the pods will result in the following error from
|
||||
the `PodNodeSelector` Admission Controller plugin:
|
||||
|
||||
```
|
||||
Error from server (Forbidden): pods "busybox" is forbidden:
|
||||
pod node label selector conflicts with its namespace node label selector
|
||||
```
|
||||
|
||||
RBAC prevents Alice to change the annotation on the namespace:
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i edit ns -n production
|
||||
Warning: resource 'namespaces' is not namespace scoped
|
||||
no
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign an Ingress Class to Alice's tenant. [Assign Ingress Classes](./ingress-classes.md).
|
||||
96
docs/operator/use-cases/onboarding.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Onboard a new tenant
|
||||
Bill receives a new request from the Acme Corp.'s CTO asking a new tenant for Alice's organization has to be on board. Bill assigns the Alice's identity `alice` in the Acme Corp. identity management system. And because, Alice is a tenant owner, Bill needs to assign `alice` the Capsule group defined by `--capsule-user-group` option, which defaults to `capsule.clastix.io`.
|
||||
|
||||
To keep the things simple, we assume that Bill just creates a client certificate for authentication using X.509 Certificate Signing Request, so Alice's certificate has `"/CN=alice/O=capsule.clastix.io"`.
|
||||
|
||||
Bill creates a new tenant `oil` in the CaaS manangement portal according to the tenant's profile:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespaceQuota: 3
|
||||
```
|
||||
|
||||
Bill checks the new tenant is created and operational:
|
||||
|
||||
```
|
||||
bill@caas# kubectl get tenant oil
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
|
||||
oil 9 0 alice User 3m
|
||||
```
|
||||
|
||||
> Note that namespaces are not yet assigned to the new tenant.
|
||||
> The tenant owners are free to create their namespaces in a self-service fashion
|
||||
> and without any intervention from Bill.
|
||||
|
||||
Once the new tenant `oil` is in place, Bill sends the login credentials to Alice.
|
||||
|
||||
Alice can log in to the CaaS platform and checks if she can create a namespace
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i create namespaces
|
||||
Warning: resource 'namespaces' is not namespace scoped
|
||||
yes
|
||||
```
|
||||
|
||||
or even delete the namespace
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i delete ns -n oil-production
|
||||
Warning: resource 'namespaces' is not namespace scoped
|
||||
yes
|
||||
```
|
||||
|
||||
However, cluster resources are not accessible to Alice
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i get namespaces
|
||||
Warning: resource 'namespaces' is not namespace scoped
|
||||
no
|
||||
|
||||
alice@caas# kubectl auth can-i get nodes
|
||||
Warning: resource 'nodes' is not namespace scoped
|
||||
no
|
||||
|
||||
alice@caas# kubectl auth can-i get persistentvolumes
|
||||
Warning: resource 'persistentvolumes' is not namespace scoped
|
||||
no
|
||||
```
|
||||
|
||||
including the `Tenant` resources
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i get tenants
|
||||
Warning: resource 'tenants' is not namespace scoped
|
||||
no
|
||||
```
|
||||
|
||||
## Assign a group of users as tenant owner
|
||||
In the example above, Bill assigned the ownership of `oil` tenant to `alice` user. However, is more likely that multiple users in the Alice's oraganization, need to admin the `oil` tenant. In such cases, Bill can assign the ownership of the `oil` tenant to a group of users instead of a single one.
|
||||
|
||||
Bill creates a new group account `oil` in the Acme Corp. identity management system and then he assigns Alice's identity `alice` to the `oil` group.
|
||||
|
||||
The tenant manifest is modified as in the following:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: oil
|
||||
kind: Group
|
||||
namespaceQuota: 3
|
||||
```
|
||||
|
||||
With the snippet above, any user belonging to the Alice's organization will be owner of the `oil` tenant with the same permissions of Alice.
|
||||
|
||||
# What’s next
|
||||
See how Alice, the tenant owner, creates new namespaces. [Create namespaces](./create-namespaces.md).
|
||||
43
docs/operator/use-cases/overview.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Use cases for Capsule
|
||||
Using Capsule, a cluster admin can implement complex multi-tenants scenarios for both public and private deployments. Here a list of common scenarios addressed by Capsule.
|
||||
|
||||
# Container as a Service (CaaS)
|
||||
***Acme Corp***, our sample organization, built a Container as a Service platform (CaaS), based on Kubernetes, to serve multiple lines of business. Each line of business, has its own team of engineers that are responsible for development, deployment, and operating their digital products.
|
||||
|
||||
To simplify the usage of Capsule in this scenario, we'll work with the following actors:
|
||||
|
||||
* ***Bill***:
|
||||
he is the cluster administrator from the operations department of Acme Corp. and he is in charge of admin and maintains the CaaS platform.
|
||||
|
||||
* ***Alice***:
|
||||
she works as IT Project Leader at Oil & Gas Business Units, two new lines of business at Acme Corp. Alice is responsible for all the strategic IT projects and she is responsible also for a team made of different background (developers, administrators, SRE engineers, etc.) and organized in separate departments.
|
||||
|
||||
* ***Joe***:
|
||||
he works at Acme Corp, as a lead developer of a distributed team in Alice's organization.
|
||||
Joe is responsible for developing a mission-critical project in the Oil market.
|
||||
|
||||
* ***Bob***:
|
||||
he is the head of Engineering for the Water Business Unit, the main and historichal line of business at Acme Corp. He is responsible for development, deployment, and operating multiple digital products in production for a large set of customers.
|
||||
|
||||
Bill, at Acme Corp. can use Capsule to address any of the following scenarios:
|
||||
|
||||
* [Onboard a new tenant](./onboarding.md)
|
||||
* [Create namespaces](./create-namespaces.md)
|
||||
* [Assign permissions](./permissions.md)
|
||||
* [Enforce resources quota and limits](./resources-quota-limits.md)
|
||||
* [Assign a nodes pool](./nodes-pool.md)
|
||||
* [Assign Ingress Classes](./ingress-classes.md)
|
||||
* [Assign Ingress Hostnames](./ingress-hostnames.md)
|
||||
* [Assign Storage Classes](./storage-classes.md)
|
||||
* [Assign Network Policies](./network-policies.md)
|
||||
* [Assign Trusted Images Registries](./images-registries.md)
|
||||
* [Assign Pod Security Policies](./pod-security-policies.md)
|
||||
* [Create Custom Resources](./custom-resources.md)
|
||||
* [Taint namespaces](./taint-namespaces.md)
|
||||
* [Assign multiple tenants to an owner](./multiple-tenants.md)
|
||||
|
||||
> NB: as we improve Capsule, more use cases about multi-tenancy and cluster governance will be covered.
|
||||
|
||||
|
||||
# What’s next
|
||||
See how the cluster admin puts a new tenant onboard. [Onboard a new tenant](./onboarding.md).
|
||||
43
docs/operator/use-cases/permissions.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Assign permissions
|
||||
Alice acts as the tenant admin. Other users can operate inside the tenant with different levels of permissions and authorizations. Alice is responsible for creating additional roles and assigning these roles to other users to work in the same tenant.
|
||||
|
||||
One of the key design principles of the Capsule is the self-provisioning management from the tenant owner's perspective. Alice, the tenant owner, does not need to interact with Bill, the cluster admin, to complete her day-by-day duties. On the other side, Bill has not to deal with multiple requests coming from multiple tenant owners that probably will overwhelm him.
|
||||
|
||||
Capsule leaves Alice the freedom to create RBAC roles at the namespace level, or using the pre-defined cluster roles already available in Kubernetes, and assign them to other users in the tenant. Being roles and rolebindings, limited to a namespace scope, Alice can assign the roles to the other users accessing the same tenant only after the namespace is created. This gives Alice the power to admin the tenant without the inteervention of the cluster admin.
|
||||
|
||||
From the cluster admin perspective, the only required action to Bill is to provision the other identities, eg. `joe` in the Identity Management system of Acme Corp. But this task can be done once, when onboarding the tenant and the users accessing the tenant can be part of the tenant business profile.
|
||||
|
||||
Alice can create Roles and RoleBindings only in the namespaces she owns
|
||||
|
||||
```
|
||||
alice@caas# kubectl auth can-i get roles -n oil-development
|
||||
yes
|
||||
|
||||
alice@caas# kubectl auth can-i get rolebindings -n oil-development
|
||||
yes
|
||||
|
||||
```
|
||||
|
||||
so she can assign the role of namespace `oil-development` admin to Joe, another user accessing the tenant `oil`
|
||||
|
||||
```yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
name: oil-development:admin
|
||||
namespace: oil-development
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: admin
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: joe
|
||||
```
|
||||
|
||||
Joe now can operate on the namespace `oil-development` as admin but he has no access to the other namespaces `oil-production`, and `oil-test` that are part of the same tenant.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, set resources quota and limits for Alice's tenant. [Enforce resources quota and limits](./resources-quota-limits.md).
|
||||
76
docs/operator/use-cases/pod-security-policies.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Assign Pod Security Policies
|
||||
Bill, the cluster admin, can assign a dedicated Pod Security Policy (PSP) to the Alice's tenant. This is likely to be a requirement in a multi-tenancy environment.
|
||||
|
||||
The cluster admin creates a PSP:
|
||||
|
||||
```yaml
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
spec:
|
||||
privileged: false
|
||||
# Required to prevent escalations to root.
|
||||
allowPrivilegeEscalation: false
|
||||
...
|
||||
```
|
||||
|
||||
Then create a _ClusterRole_ using or granting the said item
|
||||
|
||||
```yaml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp:restricted
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
resourceNames: ['psp:restricted']
|
||||
verbs: ['use']
|
||||
```
|
||||
|
||||
Bill can assign this role to any namespace in the Alice's tenant by setting it in the tenant manifest:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
additionalRoleBindings:
|
||||
- clusterRoleName: psp:privileged
|
||||
subjects:
|
||||
- kind: "Group"
|
||||
apiGroup: "rbac.authorization.k8s.io"
|
||||
name: "system:authenticated"
|
||||
...
|
||||
```
|
||||
|
||||
With the given specification, Capsule will ensure that all Alice's namespaces will contain a _RoleBinding_ for the specified _Cluster Role_. For example, in the `oil-production` namespace, Alice will see:
|
||||
|
||||
```yaml
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: 'capsule-oil-psp:privileged'
|
||||
namespace: oil-production
|
||||
labels:
|
||||
capsule.clastix.io/role-binding: a10c4c8c48474963
|
||||
capsule.clastix.io/tenant: oil
|
||||
subjects:
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: 'system:authenticated'
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: 'psp:privileged'
|
||||
```
|
||||
|
||||
With the above example, Capsule is forbidding to any authenticated user in `oil-production` namespace to run privileged pods and let them to performs privilege escalation as declared by the Cluster Role `psp:privileged`.
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign to Alice the permissions to create custom resources in her tenant. [Create Custom Resources](./custom-resources.md).
|
||||
213
docs/operator/use-cases/resources-quota-limits.md
Normal file
@@ -0,0 +1,213 @@
|
||||
# Enforce resources quota and limits
|
||||
With help of Capsule, Bill, the cluster admin, can set and enforce resources quota and limits for the Alice's tenant
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
resourceQuotas:
|
||||
- hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
scopes:
|
||||
- NotTerminating
|
||||
- hard:
|
||||
pods: "100"
|
||||
services: "50"
|
||||
- hard:
|
||||
requests.storage: 10Gi
|
||||
...
|
||||
```
|
||||
|
||||
The resources quotas above will be inherited by all the namespaces created by Alice. In our case, when Alice creates the namespace `oil-production`, Capsule creates three resource quotas:
|
||||
|
||||
```yaml
|
||||
kind: ResourceQuota
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: compute
|
||||
namespace: oil-production
|
||||
labels:
|
||||
tenant: oil
|
||||
spec:
|
||||
hard:
|
||||
limits.cpu: "8"
|
||||
limits.memory: 16Gi
|
||||
requests.cpu: "8"
|
||||
requests.memory: 16Gi
|
||||
scopes: ["NotTerminating"]
|
||||
---
|
||||
kind: ResourceQuota
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: count
|
||||
namespace: oil-production
|
||||
labels:
|
||||
tenant: oil
|
||||
spec:
|
||||
hard:
|
||||
pods : "10"
|
||||
---
|
||||
kind: ResourceQuota
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: storage
|
||||
namespace: oil-production
|
||||
labels:
|
||||
tenant: oil
|
||||
spec:
|
||||
hard:
|
||||
requests.storage: "10Gi"
|
||||
```
|
||||
|
||||
Alice can create any resource according to the assigned quotas:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production create deployment nginx --image=nginx:latest
|
||||
```
|
||||
|
||||
To check the remaining resources in the `oil-production` namespace, she gets the ResourceQuota:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production get resourcequota
|
||||
NAME AGE REQUEST LIMIT
|
||||
capsule-oil-0 42h requests.cpu: 1/8, requests.memory: 1/16Gi limits.cpu: 1/8, limits.memory: 1/16Gi
|
||||
capsule-oil-1 42h pods: 1/10
|
||||
capsule-oil-2 42h requests.storage: 0/100Gi
|
||||
```
|
||||
|
||||
By inspecting the annotations in ResourceQuota, Alice can see the used resources at tenant level and the related hard quota:
|
||||
|
||||
```yaml
|
||||
alice@caas# kubectl get resourcequotas capsule-oil-1 -o yaml
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
annotations:
|
||||
quota.capsule.clastix.io/used-pods: "1"
|
||||
quota.capsule.clastix.io/hard-pods: "10"
|
||||
...
|
||||
```
|
||||
|
||||
At the tenant level, the Capsule controller watches the resources usage for each Tenant namespace and adjusts it as an aggregate of all the namespaces using the said annotations. When the aggregate usage reaches the hard quota, then the native `ResourceQuota` Admission Controller in Kubernetes denies the Alice's request.
|
||||
|
||||
Bill, the cluster admin, can also set Limit Ranges for each namespace in the Alice's tenant by defining limits in the tenant spec:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
limitRanges:
|
||||
- limits:
|
||||
- max:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
min:
|
||||
cpu: 50m
|
||||
memory: 5Mi
|
||||
type: Pod
|
||||
- default:
|
||||
cpu: 200m
|
||||
memory: 100Mi
|
||||
defaultRequest:
|
||||
cpu: 100m
|
||||
memory: 10Mi
|
||||
max:
|
||||
cpu: "1"
|
||||
memory: 1Gi
|
||||
min:
|
||||
cpu: 50m
|
||||
memory: 5Mi
|
||||
type: Container
|
||||
- max:
|
||||
storage: 10Gi
|
||||
min:
|
||||
storage: 1Gi
|
||||
type: PersistentVolumeClaim
|
||||
...
|
||||
```
|
||||
|
||||
Limits will be inherited by all the namespaces created by Alice. In our case, when Alice creates the namespace `oil-production`, Capsule creates the following:
|
||||
|
||||
```yaml
|
||||
kind: LimitRange
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: limits
|
||||
namespace: oil-production
|
||||
labels:
|
||||
tenant: oil
|
||||
spec:
|
||||
limits:
|
||||
- type: Pod
|
||||
min:
|
||||
cpu: "50m"
|
||||
memory: "5Mi"
|
||||
max:
|
||||
cpu: "1"
|
||||
memory: "1Gi"
|
||||
- type: Container
|
||||
defaultRequest:
|
||||
cpu: "100m"
|
||||
memory: "10Mi"
|
||||
default:
|
||||
cpu: "200m"
|
||||
memory: "100Mi"
|
||||
min:
|
||||
cpu: "50m"
|
||||
memory: "5Mi"
|
||||
max:
|
||||
cpu: "1"
|
||||
memory: "1Gi"
|
||||
- type: PersistentVolumeClaim
|
||||
min:
|
||||
storage: "1Gi"
|
||||
max:
|
||||
storage: "10Gi"
|
||||
```
|
||||
|
||||
Alice can inspect Limit Ranges for her namespaces:
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production get limitranges
|
||||
NAME CREATED AT
|
||||
capsule-oil-0 2020-07-20T18:41:15Z
|
||||
|
||||
# kubectl -n oil-production describe limitranges limits
|
||||
Name: capsule-oil-0
|
||||
Namespace: oil-production
|
||||
Type Resource Min Max Default Request Default Limit Max Limit/Request Ratio
|
||||
---- -------- --- --- --------------- ------------- -----------------------
|
||||
Pod cpu 50m 1 - - -
|
||||
Pod memory 5Mi 1Gi - - -
|
||||
Container cpu 50m 1 100m 200m -
|
||||
Container memory 5Mi 1Gi 10Mi 100Mi -
|
||||
PersistentVolumeClaim storage 1Gi 10Gi - - -
|
||||
```
|
||||
|
||||
Being the limit range specific of single resources, there is no aggregate to count.
|
||||
|
||||
Having access to resource quota and limits, however, Alice is not able to change or delete it according to the assigned RBAC profile.
|
||||
|
||||
```
|
||||
alice@caas# kubectl -n oil-production auth can-i patch resourcequota
|
||||
no - no RBAC policy matched
|
||||
|
||||
alice@caas# kubectl -n oil-production auth can-i patch limitranges
|
||||
no - no RBAC policy matched
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign a pool of nodes to Alice's tenant. [Assign a nodes pool](./nodes-pool.md).
|
||||
74
docs/operator/use-cases/storage-classes.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# Assign Storage Classes
|
||||
The Acme Corp. can provide persistent storage infrastructure to their tenants. Different types of storage requirements, with different levels of QoS, eg. SSD versus HDD, are available for different tenants according to the tenant's profile. To meet these different requirements, Bill, the cluster admin can provision different Storage Classes and assign them to the tenant:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
storageClasses:
|
||||
allowed:
|
||||
- ceph-rbd
|
||||
- ceph-nfs
|
||||
...
|
||||
```
|
||||
|
||||
It is also possible to use regular expression for assigning Storage Classes:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
storageClasses:
|
||||
allowedRegex: "^ceph-.*$"
|
||||
...
|
||||
```
|
||||
|
||||
Alice, as tenant owner, gets the list of valid Storage Classes by checking any of the her namespaces:
|
||||
|
||||
```
|
||||
alice@caas# kubectl describe ns oil-production
|
||||
Name: oil-production
|
||||
Labels: capsule.clastix.io/tenant=oil
|
||||
Annotations: capsule.clastix.io/storage-classes: ceph-rbd,ceph-nfs
|
||||
capsule.clastix.io/storage-classes-regexp: ^ceph-.*$
|
||||
...
|
||||
```
|
||||
|
||||
The Capsule controller will ensure that all Persistent Volume Claims created by Alice will use only one of the assigned storage classes:
|
||||
|
||||
For example:
|
||||
|
||||
```yaml
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pvc
|
||||
namespace: oil-production
|
||||
spec:
|
||||
storageClassName: ceph-rbd
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 12Gi
|
||||
```
|
||||
|
||||
Any tentative of Alice to use a not valid Storage Class, e.g. `default`, will fail::
|
||||
```
|
||||
Error from server: error when creating persistent volume claim pvc:
|
||||
admission webhook "pvc.capsule.clastix.io" denied the request:
|
||||
Storage Class default is forbidden for the current Tenant
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign Network Policies to Alice's tenant. [Assign Network Policies](./network-policies.md).
|
||||
51
docs/operator/use-cases/taint-namespaces.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Taint namespaces
|
||||
With Capsule, Bill can _"taint"_ the namespaces created by Alice with an additional labels and/or annotations. There is no specific semantic assigned to these labels and annotations: they just will be assigned to the namespaces in the tenant as they are created by Alice. This can help the cluster admin to implement specific use cases. As for example, it can be used to implement backup as a service for namespaces in the tenant.
|
||||
|
||||
Bill assigns an additional label to the `oil` tenant to force the backup system to take care of Alice's namespaces:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespacesMetadata:
|
||||
additionalLabels:
|
||||
capsule.clastix.io/backup: "true"
|
||||
```
|
||||
|
||||
or by annotations:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1alpha1
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owner:
|
||||
name: alice
|
||||
kind: User
|
||||
namespacesMetadata:
|
||||
additionalAnnotations:
|
||||
capsule.clastix.io/do_stuff: backup
|
||||
```
|
||||
|
||||
When Alice creates a namespace, this will inherit the given label and/or annotation:
|
||||
|
||||
```yaml
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: oil-production
|
||||
labels:
|
||||
capsule.clastix.io/backup: "true" # here the additional label
|
||||
capsule.clastix.io/tenant: oil
|
||||
annotations:
|
||||
capsule.clastix.io/do_stuff: backup # here the additional annotation
|
||||
```
|
||||
|
||||
# What’s next
|
||||
See how Bill, the cluster admin, can assign multiple tenants to Alice. [Assign multiple tenants to an owner](./multiple-tenants.md).
|
||||
67
docs/proxy/overview.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Capsule Proxy
|
||||
Capsule Proxy is an add-on for the Capsule Operator.
|
||||
|
||||
## The problem
|
||||
Kubernetes RBAC lacks the ability to list only the owned cluster-scoped resources since there are no ACL-filtered APIs. For example:
|
||||
|
||||
```
|
||||
$ kubectl get namespaces
|
||||
Error from server (Forbidden): namespaces is forbidden:
|
||||
User "alice" cannot list resource "namespaces" in API group "" at the cluster scope
|
||||
```
|
||||
|
||||
However, the user can have permissions on some namespaces
|
||||
|
||||
```
|
||||
$ kubectl auth can-i [get|list|watch|delete] ns oil-production
|
||||
yes
|
||||
```
|
||||
|
||||
The reason, as the error message reported, is that the RBAC _list_ action is available only at Cluster-Scope and it is not granted to users without appropriate permissions.
|
||||
|
||||
To overcome this problem, many Kubernetes distributions introduced mirrored custom resources supported by a custom set of ACL-filtered APIs. However, this leads to radically change the user's experience of Kubernetes by introducing hard customizations that make painfull to move from one distribution to another.
|
||||
|
||||
With **Capsule**, we taken a different approach. As one of the key goals, we want to keep the same user's experience on all the distributions of Kubernetes. We want people to use the standard tools they already know and love and it should just work.
|
||||
|
||||
## How it works
|
||||
This project is an add-on of the Capsule Operator, so make sure you have a working instance of Caspule before to attempt to install it. Use the `capsule-proxy` only if you want Tenant Owners to list their own Cluster-Scope resources.
|
||||
|
||||
The `capsule-proxy` implements a simple reverse proxy that intercepts only specific requests to the APIs server and Capsule does all the magic behind the scenes.
|
||||
|
||||
Current implementation only filter two type of requests:
|
||||
|
||||
* `api/v1/namespaces`
|
||||
* `api/v1/nodes`
|
||||
|
||||
All other requestes are proxied transparently to the APIs server, so no side-effects are expected. We're planning to add new APIs in the future, so PRs are welcome!
|
||||
|
||||
## Installation
|
||||
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the APIs server. Optionally, it can be deployed as sidecar container in the backend of a dashboard.
|
||||
|
||||
An Helm Chart is available [here](https://github.com/clastix/capsule-proxy).
|
||||
|
||||
## Does it work with kubectl?
|
||||
Yes, it works by intercepting all the requests from the `kubectl` client directed to the APIs server. It works with both users who use the TLS certificate authentication and those who use OIDC.
|
||||
|
||||
As tenant owner `alice`, you are able to use `kubectl` to create some namespaces:
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-production
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-development
|
||||
$ kubectl --context alice-oidc@mycluster create namespace gas-marketing
|
||||
```
|
||||
|
||||
and list only those namespaces:
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster get namespaces
|
||||
NAME STATUS AGE
|
||||
gas-marketing Active 2m
|
||||
oil-development Active 2m
|
||||
oil-production Active 2m
|
||||
```
|
||||
|
||||
|
||||
# What’s next
|
||||
Have a fun with `capsule-proxy`:
|
||||
|
||||
* [Standalone Installation](./standalone.md)
|
||||
* [Sidecar Installation](./sidecar.md)
|
||||
118
docs/proxy/sidecar.md
Normal file
@@ -0,0 +1,118 @@
|
||||
# Sidecar Installation
|
||||
The `capsule-proxy` can be deployed as sidecar container for server-side Kubernetes dashboards. It will intercept all requests sent from the client side to the server-side of the dashboard and it will proxy them to the Kubernetes APIs server.
|
||||
|
||||
```
|
||||
capsule-proxy
|
||||
+------------+ +------------+
|
||||
|:9001 +------->|:6443 |
|
||||
+------------+ +------------+
|
||||
+-----------+ | | kube-apiserver
|
||||
browser +------>+:443 +-------->+:8443 |
|
||||
+-----------+ +------------+
|
||||
ingress-controller dashboard backend
|
||||
(ssl-passthrough)
|
||||
```
|
||||
|
||||
In order to use this pattern, the server-side backend of your dashboard must permit to specify the URL of the Kubernetes APIs server. For example, the following manifest contains an excerpt for deploying with [Kubernetes Dashboard](https://github.com/kubernetes/dashboard), and the Ingress Controller in ssl-passthrough mode.
|
||||
|
||||
Place the `capsule-proxy` in a pod with SSL mode, i.e. `--enable-ssl=true` and passing valid certificate and key files in a secret.
|
||||
|
||||
```yaml
|
||||
...
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kubernetes-dashboard
|
||||
spec:
|
||||
containers:
|
||||
- name: ns-filter
|
||||
image: quay.io/clastix/capsule-proxy
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /capsule-proxy
|
||||
- --k8s-control-plane-url=https://kubernetes.default.svc
|
||||
- --capsule-user-group=capsule.clastix.io
|
||||
- --zap-log-level=5
|
||||
- --enable-ssl=true
|
||||
- --ssl-cert-path=/opt/certs/tls.crt
|
||||
- --ssl-key-path=/opt/certs/tls.key
|
||||
volumeMounts:
|
||||
- name: ns-filter-certs
|
||||
mountPath: /opt/certs
|
||||
ports:
|
||||
- containerPort: 9001
|
||||
name: http
|
||||
protocol: TCP
|
||||
...
|
||||
```
|
||||
|
||||
In the same pod, place the Kubernetes Dashboard in _"out-of-cluster"_ mode with `--apiserver-host=https://localhost:9001` to send all the requests to the `capsule-proxy` sidecar container:
|
||||
|
||||
|
||||
```yaml
|
||||
...
|
||||
- name: dashboard
|
||||
image: kubernetesui/dashboard:v2.0.4
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
protocol: TCP
|
||||
args:
|
||||
- --auto-generate-certificates
|
||||
- --namespace=cmp-system
|
||||
- --tls-cert-file=tls.crt
|
||||
- --tls-key-file=tls.key
|
||||
- --apiserver-host=https://localhost:9001
|
||||
- --kubeconfig=/opt/.kube/config
|
||||
volumeMounts:
|
||||
- name: kubernetes-dashboard-certs
|
||||
mountPath: /certs
|
||||
- mountPath: /tmp
|
||||
name: tmp-volume
|
||||
- mountPath: /opt/.kube
|
||||
name: kubeconfig
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
scheme: HTTPS
|
||||
path: /
|
||||
port: 8443
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
||||
...
|
||||
```
|
||||
|
||||
Make sure you pass a valid `kubeconfig` file to the dashboard pointing to the `capsule-proxy` sidecar container instead of the `kube-apiserver` directly:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubernetes-dashboard-kubeconfig
|
||||
namespace: kubernetes-dashboard
|
||||
data:
|
||||
config: |
|
||||
kind: Config
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:9001 # <- point to the capsule-proxy
|
||||
name: localhost
|
||||
contexts:
|
||||
- context:
|
||||
cluster: localhost
|
||||
user: kubernetes-admin # <- dashboard has cluster-admin permissions
|
||||
name: admin@localhost
|
||||
current-context: admin@localhost
|
||||
preferences: {}
|
||||
users:
|
||||
- name: kubernetes-admin
|
||||
user:
|
||||
client-certificate-data: REDACTED
|
||||
client-key-data: REDACTED
|
||||
```
|
||||
|
||||
After starting the dashboard, login as a Tenant Owner user, e.g. `alice` according to the used authentication method, and check you can see only owned namespaces.
|
||||
|
||||
The `capsule-proxy` can be deployed in standalone mode, in order to be used with a command line tools like `kubectl`. See [Standalone Installation](./standalone.md).
|
||||
|
||||
222
docs/proxy/standalone.md
Normal file
@@ -0,0 +1,222 @@
|
||||
# Standalone Installation
|
||||
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the `kube-apiserver`. Use this way to provide access to client-side command line tools like `kubectl` or even client-side dashboards.
|
||||
|
||||
You can use an Ingress Controller to expose the `capsule-proxy` endpoint in SSL passthrough, or,depending on your environment, you can expose it with either a `NodePort`, or a `LoadBalancer` service. As further alternatives, use `HostPort` or `HostNetwork` mode.
|
||||
|
||||
```
|
||||
+-----------+ +-----------+ +-----------+
|
||||
kubectl ------>|:443 |--------->|:9001 |-------->|:6443 |
|
||||
+-----------+ +-----------+ +-----------+
|
||||
ingress-controller capsule-proxy kube-apiserver
|
||||
(ssl-passthrough)
|
||||
```
|
||||
|
||||
## Configure Capsule
|
||||
Make sure to have a working instance of the Capsule Operator in your Kubernetes cluster before to attempt to use `capsule-proxy`. Please, refer to the Capsule Operator [documentation](../operator/overview.md) for instructions.
|
||||
|
||||
You should also have one or more tenants defined, e.g. `oil` and `gas` and they are assigned to the user `alice`.
|
||||
|
||||
As cluster admin, check there are the tenants:
|
||||
|
||||
```
|
||||
$ kubectl get tenants
|
||||
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND AGE
|
||||
foo 3 1 joe User 4d
|
||||
gas 3 0 alice User 1d
|
||||
oil 9 0 alice User 1d
|
||||
```
|
||||
|
||||
## Install Capsule Proxy
|
||||
Create a secret in the target namespace containing the SSL certificate which `capsule-proxy` will use.
|
||||
|
||||
```
|
||||
$ kubectl -n capsule-system create secret tls capsule-proxy --cert=tls.cert --key=tls.key
|
||||
```
|
||||
|
||||
Then use the Helm Chart to install the `capsule-proxy` in such namespace:
|
||||
|
||||
```bash
|
||||
$ cat <<EOF | sudo tee custom-values.yaml
|
||||
options:
|
||||
enableSSL: true
|
||||
ingress:
|
||||
enabled: true
|
||||
annotations:
|
||||
ingress.kubernetes.io/ssl-passthrough: 'true'
|
||||
hosts:
|
||||
- host: kube.clastix.io
|
||||
paths: [ "/" ]
|
||||
EOF
|
||||
|
||||
$ helm install capsule-proxy capsule-proxy \
|
||||
--valuecustom-values.yaml \
|
||||
-n capsule-system
|
||||
```
|
||||
|
||||
The `capsule-proxy` should be exposed with an Ingress in SSL passthrough mode and reachable at `https://kube.clastix.io`.
|
||||
|
||||
## TLS Client Authentication
|
||||
Users using a TLS client based authentication with certificate and key are able to talks with `capsule-proxy` since the current implementation of the reverse proxy is able to forward client certificates to the Kubernetes APIs server.
|
||||
|
||||
## OIDC Authentication
|
||||
The `capsule-proxy` works with `kubectl` users with a token-based authentication, e.g. OIDC or Bearer Token. In the following example, we'll use Keycloak as OIDC server capable to provides JWT tokens.
|
||||
|
||||
### Configuring Keycloak
|
||||
Configure Keycloak as OIDC server:
|
||||
|
||||
- Add a realm called `caas`, or use any existing realm instead
|
||||
- Add a group `capsule.clastix.io`
|
||||
- Add a user `alice` assigned to group `capsule.clastix.io`
|
||||
- Add an OIDC client called `kubernetes`
|
||||
- For the `kubernetes` client, create protocol mappers called `groups` and `audience`
|
||||
|
||||
If everything is done correctly, now you should be able to authenticate in Keycloak and see user groups in JWT tokens. Use the following snippet to authenticate in Keycloak as `alice` user:
|
||||
|
||||
```
|
||||
$ KEYCLOAK=sso.clastix.io
|
||||
$ REALM=caas
|
||||
$ OIDC_ISSUER=${KEYCLOAK}/auth/realms/${REALM}
|
||||
|
||||
$ curl -k -s https://${OIDC_ISSUER}/protocol/openid-connect/token \
|
||||
-d grant_type=password \
|
||||
-d response_type=id_token \
|
||||
-d scope=openid \
|
||||
-d client_id=${OIDC_CLIENT_ID} \
|
||||
-d client_secret=${OIDC_CLIENT_SECRET} \
|
||||
-d username=${USERNAME} \
|
||||
-d password=${PASSWORD} | jq
|
||||
```
|
||||
|
||||
The result will include an `ACCESS_TOKEN`, a `REFRESH_TOKEN`, and an `ID_TOKEN`. The access-token can generally be disregarded for Kubernetes. It would be used if the identity provider was managing roles and permissions for the users but that is done in Kubernetes itself with RBAC. The id-token is short lived while the refresh-token has longer expiration. The refresh-token is used to fetch a new id-token when the id-token expires.
|
||||
|
||||
```json
|
||||
{
|
||||
"access_token":"ACCESS_TOKEN",
|
||||
"refresh_token":"REFRESH_TOKEN",
|
||||
"id_token": "ID_TOKEN",
|
||||
"token_type":"bearer",
|
||||
"scope": "openid groups profile email"
|
||||
}
|
||||
```
|
||||
|
||||
To introspect the `ID_TOKEN` token run:
|
||||
```
|
||||
$ curl -k -s https://${OIDC_ISSUER}/protocol/openid-connect/introspect \
|
||||
-d token=${ID_TOKEN} \
|
||||
--user ${OIDC_CLIENT_ID}:${OIDC_CLIENT_SECRET} | jq
|
||||
```
|
||||
|
||||
The result will be like the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"exp": 1601323086,
|
||||
"iat": 1601322186,
|
||||
"aud": "kubernetes",
|
||||
"typ": "ID",
|
||||
"azp": "kubernetes",
|
||||
"preferred_username": "alice",
|
||||
"email_verified": false,
|
||||
"acr": "1",
|
||||
"groups": [
|
||||
"capsule.clastix.io"
|
||||
],
|
||||
"client_id": "kubernetes",
|
||||
"username": "alice",
|
||||
"active": true
|
||||
}
|
||||
```
|
||||
|
||||
### Configuring Kubernetes API Server
|
||||
Configuring Kubernetes for OIDC Authentication requires adding several parameters to the API Server. Please, refer to the [documentation](https://kubernetes.io/docs/reference/access-authn-authz/authentication/#openid-connect-tokens) for details and examples. Most likely, your `kube-apiserver.yaml` manifest will looks like the following:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- kube-apiserver
|
||||
...
|
||||
- --oidc-issuer-url=https://${OIDC_ISSUER}
|
||||
- --oidc-ca-file=/etc/kubernetes/oidc/ca.crt
|
||||
- --oidc-client-id=${OIDC_CLIENT_SECRET}
|
||||
- --oidc-username-claim=preferred_username
|
||||
- --oidc-groups-claim=groups
|
||||
- --oidc-username-prefix=-
|
||||
```
|
||||
|
||||
### Configuring kubectl
|
||||
There are two options to use `kubectl` with OIDC:
|
||||
|
||||
- OIDC Authenticator
|
||||
- Use the `--token` option
|
||||
|
||||
To use the OIDC Authenticator, add an `oidc` user entry to your `kubeconfig` file:
|
||||
```
|
||||
$ kubectl config set-credentials oidc \
|
||||
--auth-provider=oidc \
|
||||
--auth-provider-arg=idp-issuer-url=https://${OIDC_ISSUER} \
|
||||
--auth-provider-arg=idp-certificate-authority=/path/to/ca.crt \
|
||||
--auth-provider-arg=client-id=${OIDC_CLIENT_ID} \
|
||||
--auth-provider-arg=client-secret=${OIDC_CLIENT_SECRET} \
|
||||
--auth-provider-arg=refresh-token=${REFRESH_TOKEN} \
|
||||
--auth-provider-arg=id-token=${ID_TOKEN} \
|
||||
--auth-provider-arg=extra-scopes=groups
|
||||
```
|
||||
|
||||
To use the --token option:
|
||||
```
|
||||
$ kubectl config set-credentials oidc --token=${ID_TOKEN}
|
||||
```
|
||||
|
||||
Point the kubectl to the URL where the `capsule-proxy` service is reachable:
|
||||
```
|
||||
$ kubectl config set-cluster mycluster \
|
||||
--server=https://kube.clastix.io \
|
||||
--certificate-authority=~/.kube/ca.crt
|
||||
```
|
||||
|
||||
Create a new context for the OIDC authenticated users:
|
||||
```
|
||||
$ kubectl config set-context alice-oidc@mycluster \
|
||||
--cluster=mycluster \
|
||||
--user=oidc
|
||||
```
|
||||
|
||||
As user `alice`, you should be able to use `kubectl` to create some namespaces:
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-production
|
||||
$ kubectl --context alice-oidc@mycluster create namespace oil-development
|
||||
$ kubectl --context alice-oidc@mycluster create namespace gas-marketing
|
||||
```
|
||||
|
||||
and list only those namespaces:
|
||||
```
|
||||
$ kubectl --context alice-oidc@mycluster get namespaces
|
||||
NAME STATUS AGE
|
||||
gas-marketing Active 2m
|
||||
oil-development Active 2m
|
||||
oil-production Active 2m
|
||||
```
|
||||
|
||||
When logged as cluster-admin power user you should be able to see all namespaces:
|
||||
```
|
||||
$ kubectl get namespaces
|
||||
NAME STATUS AGE
|
||||
default Active 78d
|
||||
kube-node-lease Active 78d
|
||||
kube-public Active 78d
|
||||
kube-system Active 78d
|
||||
gas-marketing Active 2m
|
||||
oil-development Active 2m
|
||||
oil-production Active 2m
|
||||
```
|
||||
|
||||
_Nota Bene_: once your `ID_TOKEN` expires, the `kubectl` OIDC Authenticator will attempt to refresh automatically your `ID_TOKEN` using the `REFRESH_TOKEN`, the `OIDC_CLIENT_ID` and the `OIDC_CLIENT_SECRET` storing the new values for the `REFRESH_TOKEN` and `ID_TOKEN` in your `kubeconfig` file. In case the OIDC uses a self signed CA certificate, make sure to specify it with the `idp-certificate-authority` option in your `kubeconfig` file, otherwise you'll not able to refresh the tokens. Once the `REFRESH_TOKEN` is expired, you will need to refresh tokens manually.
|
||||
|
||||
## RBAC Considerations
|
||||
Currently, the service account used for `capsule-proxy` needs to have `cluster-admin` permissions.
|
||||
|
||||
## Configuring client-only dashboards
|
||||
If you're using a client-only dashboard, for example [Lens](https://k8slens.dev/), the `capsule-proxy` can be used as in the previous `kubectl` example since Lens just needs for a `kubeconfig` file. Assuming to use a `kubeconfig` file containing a valid OIDC token released for the `alice` user, you can access the cluster with Lens dashboard and see only namespaces belonging to the Alice's tenants.
|
||||
|
||||
For web based dashboards, like the [Kubernetes Dashboard](https://github.com/kubernetes/dashboard), the `capsule-proxy` can be installed as sidecar container. See [Sidecar Installation](./sidecar.md).
|
||||
85
e2e/additional_role_bindings_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
//+build e2e
|
||||
|
||||
/*
|
||||
Copyright 2020 Clastix Labs.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/clastix/capsule/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("creating a Namespace with an additional Role Binding", func() {
|
||||
tnt := &v1alpha1.Tenant{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "additional-role-binding",
|
||||
},
|
||||
Spec: v1alpha1.TenantSpec{
|
||||
Owner: v1alpha1.OwnerSpec{
|
||||
Name: "dale",
|
||||
Kind: "User",
|
||||
},
|
||||
AdditionalRoleBindings: []v1alpha1.AdditionalRoleBindings{
|
||||
{
|
||||
ClusterRoleName: "crds-rolebinding",
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "Group",
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Name: "system:authenticated",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
JustBeforeEach(func() {
|
||||
EventuallyCreation(func() error {
|
||||
tnt.ResourceVersion = ""
|
||||
return k8sClient.Create(context.TODO(), tnt)
|
||||
}).Should(Succeed())
|
||||
})
|
||||
JustAfterEach(func() {
|
||||
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
|
||||
})
|
||||
|
||||
It("should be assigned to each Namespace", func() {
|
||||
for _, ns := range []string{"rb-1", "rb-2", "rb-3"} {
|
||||
ns := NewNamespace(ns)
|
||||
NamespaceCreation(ns, tnt, defaultTimeoutInterval).Should(Succeed())
|
||||
TenantNamespaceList(tnt, podRecreationTimeoutInterval).Should(ContainElement(ns.GetName()))
|
||||
|
||||
var rb *rbacv1.RoleBinding
|
||||
|
||||
Eventually(func() (err error) {
|
||||
cs := ownerClient(tnt)
|
||||
rb, err = cs.RbacV1().RoleBindings(ns.Name).Get(context.Background(), fmt.Sprintf("capsule-%s-%s", tnt.Name, "crds-rolebinding"), metav1.GetOptions{})
|
||||
return err
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(Succeed())
|
||||
Expect(rb.RoleRef.Name).Should(Equal(tnt.Spec.AdditionalRoleBindings[0].ClusterRoleName))
|
||||
Expect(rb.Subjects).Should(Equal(tnt.Spec.AdditionalRoleBindings[0].Subjects))
|
||||
}
|
||||
})
|
||||
})
|
||||