Compare commits

...

17 Commits

Author SHA1 Message Date
Tom OBrien
0b199f4136 fix: modify jobs.image.tag for eks
EKS sometimes has a '+' in kubernetes minor version
This results in invalid image tag for jobs
2022-01-18 16:26:24 +00:00
Dario Tranchitella
1bbaebbc90 build(installer): releaseing to capsule v0.1.1 2022-01-11 09:35:29 +00:00
Dario Tranchitella
4b8d8b2a7c build(helm): aligning to capsule v0.1.1 2022-01-11 09:35:29 +00:00
Dario Tranchitella
3fb4c41daf docs: removing development environment setup for capsule-proxy 2022-01-11 08:21:16 +00:00
Dario Tranchitella
055791966a docs: aliging to capsule-proxy documentation 2022-01-11 08:21:16 +00:00
Dario Tranchitella
c9af9c18e4 chore(ci): e2e for kubernetes v1.23 2022-01-03 10:33:42 +00:00
Maksim Fedotov
fef381d2b4 feat(helm): add default conversion webhook configuration to tenant CRD 2021-12-30 08:31:13 +00:00
Max Fedotov
19aff8c882 fix: ignore NotFound error in ServiceLabelsReconciler (#494)
Co-authored-by: Maksim Fedotov <m_fedotov@wargaming.net>
2021-12-29 18:26:45 +02:00
Dario Tranchitella
8da7e22cb2 fix(docs): broken link for documentation static website 2021-12-29 16:07:37 +00:00
Dario Tranchitella
47c37a3d5d feat(docs): v1alpha1 to v1beta1 upgrade guide 2021-12-27 07:51:04 +00:00
Dario Tranchitella
677175b3ed fix(docs): referring to old capsule version 2021-12-27 07:51:04 +00:00
Dario Tranchitella
c95e3a2068 docs: restoring multi-tenancy benchmark results 2021-12-26 19:51:48 +00:00
Dario Tranchitella
0be3be4480 docs: limiting amount of resources deployed in a tenant 2021-12-23 11:39:34 +00:00
Dario Tranchitella
6ad434fcfb test(e2e): limiting amount of resources deployed in a tenant 2021-12-23 11:39:34 +00:00
Dario Tranchitella
e53911942d feat: limiting amount of resources deployed in a tenant 2021-12-23 11:39:34 +00:00
ptx96
a179645f26 feat(helm): find kubectl tag from server version 2021-12-22 09:33:27 +01:00
Dario Tranchitella
778fb4bcc2 fix: starting all controllers only when certificates are generated
This is going to solve the issue when upgrading Capsule <v0.1.0 to
>=v0.1.0: due to a resource reflector many warning were polluting the
reconciliation loop and causing unmarshaling errors.

Additionally, just the CA secret was checked before starting the
Operator, when also the TLS is requested for the webhooks, along with
the `/convert` one that is used for the CR version conversion.
2021-12-21 06:45:16 +00:00
27 changed files with 3149 additions and 165 deletions

View File

@@ -29,7 +29,7 @@ jobs:
name: Kubernetes
strategy:
matrix:
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.0']
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.0']
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2

View File

@@ -59,7 +59,7 @@ Assign to tenants a dedicated set of compute, storage, and network resources and
# Documentation
Please, check the project [documentation](capsule.clastix.io) for the cool things you can do with Capsule.
Please, check the project [documentation](https://capsule.clastix.io) for the cool things you can do with Capsule.
# Contributions

View File

@@ -0,0 +1,47 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package v1beta1
import (
"fmt"
"strconv"
)
const (
ResourceQuotaAnnotationPrefix = "quota.resources.capsule.clastix.io"
ResourceUsedAnnotationPrefix = "used.resources.capsule.clastix.io"
)
func UsedAnnotationForResource(kindGroup string) string {
return fmt.Sprintf("%s/%s", ResourceUsedAnnotationPrefix, kindGroup)
}
func LimitAnnotationForResource(kindGroup string) string {
return fmt.Sprintf("%s/%s", ResourceQuotaAnnotationPrefix, kindGroup)
}
func GetUsedResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
usedStr, ok := tenant.GetAnnotations()[UsedAnnotationForResource(kindGroup)]
if !ok {
usedStr = "0"
}
used, _ := strconv.ParseInt(usedStr, 10, 10)
return used, nil
}
func GetLimitResourceFromTenant(tenant Tenant, kindGroup string) (int64, error) {
limitStr, ok := tenant.GetAnnotations()[LimitAnnotationForResource(kindGroup)]
if !ok {
return 0, fmt.Errorf("resource %s is not limited for the current tenant", kindGroup)
}
limit, err := strconv.ParseInt(limitStr, 10, 10)
if err != nil {
return 0, fmt.Errorf("resource %s limit cannot be parsed, %w", kindGroup, err)
}
return limit, nil
}

View File

@@ -21,8 +21,8 @@ sources:
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.4
version: 0.1.6
# This is the version number of the application being deployed.
# This version number should be incremented each time you make changes to the application.
appVersion: 0.1.0
appVersion: 0.1.1

View File

@@ -7,7 +7,17 @@ metadata:
name: tenants.capsule.clastix.io
spec:
conversion:
strategy: None
strategy: Webhook
webhook:
clientConfig:
service:
name: capsule-webhook-service
namespace: capsule-system
path: /convert
port: 443
conversionReviewVersions:
- v1alpha1
- v1beta1
group: capsule.clastix.io
names:
kind: Tenant

View File

@@ -91,11 +91,26 @@ Create the proxy fully-qualified Docker image to use
{{- printf "%s:%s" .Values.proxy.image.repository .Values.proxy.image.tag -}}
{{- end }}
{{/*
Determine the Kubernetes version to use for jobsFullyQualifiedDockerImage tag
*/}}
{{- define "capsule.jobsTagKubeVersion" -}}
{{- if contains "-eks-" .Capabilities.KubeVersion.GitVersion }}
{{- print "v" .Capabilities.KubeVersion.Major "." (.Capabilities.KubeVersion.Minor | replace "+" "") -}}
{{- else }}
{{- print "v" .Capabilities.KubeVersion.Major "." .Capabilities.KubeVersion.Minor -}}
{{- end }}
{{- end }}
{{/*
Create the jobs fully-qualified Docker image to use
*/}}
{{- define "capsule.jobsFullyQualifiedDockerImage" -}}
{{- if .Values.jobs.image.tag }}
{{- printf "%s:%s" .Values.jobs.image.repository .Values.jobs.image.tag -}}
{{- else }}
{{- printf "%s:%s" .Values.jobs.image.repository (include "capsule.jobsTagKubeVersion" .) -}}
{{- end }}
{{- end }}
{{/*

View File

@@ -41,7 +41,7 @@ jobs:
image:
repository: quay.io/clastix/kubectl
pullPolicy: IfNotPresent
tag: "v1.20.7"
tag: ""
imagePullSecrets: []
serviceAccount:
create: true

View File

@@ -1411,7 +1411,7 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
image: quay.io/clastix/capsule:v0.1.1-rc1
image: quay.io/clastix/capsule:v0.1.1
imagePullPolicy: IfNotPresent
name: manager
ports:

View File

@@ -7,4 +7,4 @@ kind: Kustomization
images:
- name: controller
newName: quay.io/clastix/capsule
newTag: v0.1.1-rc1
newTag: v0.1.1

View File

@@ -189,7 +189,7 @@ func (r CAReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctrl
tls := &corev1.Secret{}
err = r.Get(ctx, types.NamespacedName{
Namespace: r.Namespace,
Name: tlsSecretName,
Name: TLSSecretName,
}, tls)
if err != nil {
r.Log.Error(err, "Capsule TLS Secret missing")

View File

@@ -8,5 +8,5 @@ const (
privateKeySecretKey = "tls.key"
CASecretName = "capsule-ca"
tlsSecretName = "capsule-tls"
TLSSecretName = "capsule-tls"
)

View File

@@ -33,7 +33,7 @@ type TLSReconciler struct {
func (r *TLSReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&corev1.Secret{}, forOptionPerInstanceName(tlsSecretName)).
For(&corev1.Secret{}, forOptionPerInstanceName(TLSSecretName)).
Complete(r)
}
@@ -112,7 +112,7 @@ func (r TLSReconciler) Reconcile(ctx context.Context, request ctrl.Request) (ctr
return reconcile.Result{}, err
}
if instance.Name == tlsSecretName && res == controllerutil.OperationResultUpdated {
if instance.Name == TLSSecretName && res == controllerutil.OperationResultUpdated {
r.Log.Info("Capsule TLS certificates has been updated, Controller pods must be restarted to load new certificate")
hostname, _ := os.Hostname()

View File

@@ -9,6 +9,7 @@ import (
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -49,6 +50,9 @@ func (r *abstractServiceLabelsReconciler) Reconcile(ctx context.Context, request
err = r.client.Get(ctx, request.NamespacedName, r.obj)
if err != nil {
if errors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}

View File

@@ -9,6 +9,7 @@ import (
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
@@ -20,9 +21,10 @@ import (
type Manager struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
Log logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
RESTConfig *rest.Config
}
func (r *Manager) SetupWithManager(mgr ctrl.Manager) error {
@@ -55,6 +57,12 @@ func (r Manager) Reconcile(ctx context.Context, request ctrl.Request) (result ct
return
}
r.Log.Info("Ensuring limit resources count is updated")
if err = r.syncCustomResourceQuotaUsages(ctx, instance); err != nil {
r.Log.Error(err, "Cannot count limited resources")
return
}
// Ensuring all namespaces are collected
r.Log.Info("Ensuring all Namespaces are collected")
if err = r.collectNamespaces(instance); err != nil {

View File

@@ -0,0 +1,122 @@
package tenant
import (
"context"
"fmt"
"strings"
"golang.org/x/sync/errgroup"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/util/retry"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
)
func (r *Manager) syncCustomResourceQuotaUsages(ctx context.Context, tenant *capsulev1beta1.Tenant) error {
type resource struct {
kind string
group string
version string
}
var resourceList []resource
for k := range tenant.GetAnnotations() {
if !strings.HasPrefix(k, capsulev1beta1.ResourceQuotaAnnotationPrefix) {
continue
}
parts := strings.Split(k, "/")
if len(parts) != 2 {
r.Log.Info("non well-formed Resource Limit annotation", "key", k)
continue
}
parts = strings.Split(parts[1], "_")
if len(parts) != 2 {
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve version", "key", k)
continue
}
groupKindParts := strings.Split(parts[0], ".")
if len(groupKindParts) < 2 {
r.Log.Info("non well-formed Resource Limit annotation, cannot retrieve kind and group", "key", k)
continue
}
resourceList = append(resourceList, resource{
kind: groupKindParts[0],
group: strings.Join(groupKindParts[1:], "."),
version: parts[1],
})
}
errGroup := new(errgroup.Group)
usedMap := make(map[string]int)
defer func() {
for gvk, used := range usedMap {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (retryErr error) {
tnt := &capsulev1beta1.Tenant{}
if retryErr = r.Client.Get(ctx, types.NamespacedName{Name: tenant.GetName()}, tnt); retryErr != nil {
return
}
if tnt.GetAnnotations() == nil {
tnt.Annotations = make(map[string]string)
}
tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(gvk)] = fmt.Sprintf("%d", used)
return r.Client.Update(ctx, tnt)
})
if err != nil {
r.Log.Error(err, "cannot update custom Resource Quota", "GVK", gvk)
}
}
}()
for _, item := range resourceList {
res := item
errGroup.Go(func() (scopeErr error) {
dynamicClient := dynamic.NewForConfigOrDie(r.RESTConfig)
for _, ns := range tenant.Status.Namespaces {
var list *unstructured.UnstructuredList
list, scopeErr = dynamicClient.Resource(schema.GroupVersionResource{Group: res.group, Version: res.version, Resource: res.kind}).List(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.namespace==%s", ns),
})
if scopeErr != nil {
return scopeErr
}
key := fmt.Sprintf("%s.%s_%s", res.kind, res.group, res.version)
if _, ok := usedMap[key]; !ok {
usedMap[key] = 0
}
usedMap[key] += len(list.Items)
}
return
})
}
if err := errGroup.Wait(); err != nil {
return err
}
return nil
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 294 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 283 KiB

View File

@@ -338,57 +338,3 @@ Now it's time to work through our familiar inner loop for development in our pre
]
}
```
## Debug Capsule Proxy locally
This section helps new contributors to locally run and debug `capsule-proxy` in _out or cluster_ mode:
1. You need to run a kind cluster and find the endpoint port of `kind-control-plane` using `docker ps`:
```bash
docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
88432e392adb kindest/node:v1.20.2 "/usr/local/bin/entr…" 32 seconds ago Up 28 seconds 127.0.0.1:64582->6443/tcp kind-control-plane
```
2. You need to generate TLS cert keys for localhost, you can use [mkcert](https://github.com/FiloSottile/mkcert):
```bash
> cd /tmp
> mkcert localhost
> ls
localhost-key.pem localhost.pem
```
3. Run the proxy with the following options
```bash
go run main.go \
--ssl-cert-path=/tmp/localhost.pem \
--ssl-key-path=/tmp/localhost-key.pem \
--enable-ssl=true \
--kubeconfig=<YOUR KUBERNETES CONFIGURATION FILE>
```
5. Edit the `KUBECONFIG` file (you should make a copy and work on it) as follows:
- Find the section of your cluster
- replace the server path with `https://127.0.0.1:9001`
- replace the certificate-authority-data path with the content of your rootCA.pem file. (if you use mkcert, you'll find with `cat "$(mkcert -CAROOT)/rootCA.pem"|base64|tr -d '\n'`)
6. Now you should be able to run kubectl using the proxy!
## Debug Capsule Proxy remotely
In some cases, you would need to debug the in-cluster mode and [`delve`](https://github.com/go-delve/delve) plays a big role here.
1. build the Docker image with `delve` issuing `make dlv-build`
2. with the `quay.io/clastix/capsule-proxy:dlv` produced Docker image, publish it or load it to your [KinD](https://github.com/kubernetes-sigs/kind) instance (`kind load docker-image --name capsule --nodes capsule-control-plane quay.io/clastix/capsule-proxy:dlv`)
3. change the Deployment image using `kubectl edit` or `kubectl set image deployment/capsule-proxy capsule-proxy=quay.io/clastix/capsule-proxy:dlv`
4. wait for the image rollout (`kubectl -n capsule-system rollout status deployment/capsule-proxy`)
5. perform the port-forwarding with `kubectl -n capsule-system port-forward $(kubectl -n capsule-system get pods -l app.kubernetes.io/name=capsule-proxy --output name) 2345:2345`
6. connect using your `delve` options
> _Nota Bene_: the application could be killed by the Liveness Probe since delve will wait for the debugger connection before starting it.
> Feel free to edit and remove the probes to avoid this kind of issue.
Please refer to [contributing](/docs/contributing) for more details while contributing.

2284
docs/content/general/mtb.md Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
Capsule Proxy is an add-on for Capsule Operator addressing some RBAC issues when enabling multi-tenacy in Kubernetes since users cannot list the owned cluster-scoped resources.
For example:
Kubernetes RBAC cannot list only the owned cluster-scoped resources since there are no ACL-filtered APIs. For example:
```
$ kubectl get namespaces
@@ -29,21 +29,26 @@ The `capsule-proxy` implements a simple reverse proxy that intercepts only speci
Current implementation filters the following requests:
* `api/v1/namespaces`
* `api/v1/nodes`
* `apis/storage.k8s.io/v1/storageclasses{/name}`
* `apis/networking.k8s.io/{v1,v1beta1}/ingressclasses{/name}`
* `api/scheduling.k8s.io/{v1}/priorityclasses{/name}`
* `/api/scheduling.k8s.io/{v1}/priorityclasses{/name}`
* `/api/v1/namespaces`
* `/api/v1/nodes{/name}`
* `/api/v1/pods?fieldSelector=spec.nodeName%3D{name}`
* `/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/{name}`
* `/apis/metrics.k8s.io/{v1beta1}/nodes{/name}`
* `/apis/networking.k8s.io/{v1,v1beta1}/ingressclasses{/name}`
* `/apis/storage.k8s.io/v1/storageclasses{/name}`
All other requestes are proxied transparently to the APIs server, so no side-effects are expected. We're planning to add new APIs in the future, so PRs are welcome!
All other requests are proxied transparently to the APIs server, so no side effects are expected. We're planning to add new APIs in the future, so [PRs are welcome](https://github.com/clastix/capsule-proxy)!
## Installation
Capsule Proxy is an optional add-on of the main Capsule Operator, so make sure you have a working instance of Caspule before attempting to install it. Use the `capsule-proxy` only if you want Tenant Owners to list their own Cluster-Scope resources.
Capsule Proxy is an optional add-on of the main Capsule Operator, so make sure you have a working instance of Caspule before attempting to install it.
Use the `capsule-proxy` only if you want Tenant Owners to list their own Cluster-Scope resources.
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the APIs server. Optionally, it can be deployed as a sidecar container in the backend of a dashboard.
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the APIs server.
Optionally, it can be deployed as a sidecar container in the backend of a dashboard.
Running outside of a Kubernetes cluster is also viable, although a valid `KUBECONFIG` file must be provided, using the environment variable `KUBECONFIG` or the default file in `$HOME/.kube/config`.
Running outside a Kubernetes cluster is also viable, although a valid `KUBECONFIG` file must be provided, using the environment variable `KUBECONFIG` or the default file in `$HOME/.kube/config`.
A Helm Chart is available [here](https://github.com/clastix/capsule/blob/master/charts/capsule/README.md).
@@ -64,6 +69,18 @@ Here how it looks like when exposed through an Ingress Controller:
ingress-controller capsule-proxy kube-apiserver
```
## CLI flags
- `capsule-configuration-name`: name of the `CapsuleConfiguration` resource which is containing the [Capsule configurations](/docs/general/references/#capsule-configuration) (default: `default`)
- `capsule-user-group` (deprecated): old way to specify the user groups which request must be intercepted by the proxy
- `ignored-user-group`: names of the groups which requests must be ignored and proxy-passed to the upstream server
- `listening-port`: HTTP port the proxy listens to (default: `9001`)
- `oidc-username-claim`: the OIDC field name used to identify the user (default: `preferred_username`), the proper value can be extracted from the Kubernetes API Server flags
- `enable-ssl`: enable the bind on HTTPS for secure communication, allowing client-based certificate, also knows as mutual TLS (default: `true`)
- `ssl-cert-path`: path to the TLS certificate, then TLS mode is enabled (default: `/opt/capsule-proxy/tls.crt`)
- `ssl-key-path`: path to the TLS certificate key, when TLS mode is enabled (default: `/opt/capsule-proxy/tls.key`)
- `rolebindings-resync-period`: resync period for RoleBinding resources reflector, lower values can help if you're facing [flaky etcd connection](https://github.com/clastix/capsule-proxy/issues/174) (default: `10h`)
## User Authentication
The `capsule-proxy` intercepts all the requests from the `kubectl` client directed to the APIs Server. Users using a TLS client based authentication with certificate and key are able to talks with APIs Server since it is able to forward client certificates to the Kubernetes APIs server.
@@ -72,6 +89,16 @@ It is possible to protect the `capsule-proxy` using a certificate provided by Le
If your prerequisite is exposing `capsule-proxy` using an Ingress, you must rely on the token-based authentication, for example OIDC or Bearer tokens. Users providing tokens are always able to reach the APIs Server.
## Kubernetes dashboards integration
If you're using a client-only dashboard, for example [Lens](https://k8slens.dev/), the `capsule-proxy` can be used as with `kubectl` since this dashboard usually talks to the APIs server using just a `kubeconfig` file.
![Lens dashboard](../assets/proxy-lens.png)
For a web-based dashboard, like the [Kubernetes Dashboard](https://github.com/kubernetes/dashboard), the `capsule-proxy` can be deployed as a sidecar container in the backend, following the well-known cloud-native _Ambassador Pattern_.
![Kubernetes dashboard](../assets/proxy-kubernetes-dashboard.png)
## Tenant Owner Authorization
Each Tenant owner can have their capabilities managed pretty similar to a standard Kubernetes RBAC.
@@ -107,6 +134,7 @@ Each Resource kind can be granted with several verbs, such as:
### Namespaces
As tenant owner `alice`, you can use `kubectl` to create some namespaces:
```
$ kubectl --context alice-oidc@mycluster create namespace oil-production
$ kubectl --context alice-oidc@mycluster create namespace oil-development
@@ -125,6 +153,44 @@ oil-production Active 2m
### Nodes
The Capsule Proxy gives the owners the ability to access the nodes matching the `.spec.nodeSelector` in the Tenant manifest:
```yaml
apiVersion: capsule.clastix.io/v1beta1
kind: Tenant
metadata:
name: oil
spec:
owners:
- kind: User
name: alice
proxySettings:
- kind: Nodes
operations:
- List
nodeSelector:
kubernetes.io/hostname: capsule-gold-qwerty
```
```bash
$ kubectl --context alice-oidc@mycluster get nodes
NAME STATUS ROLES AGE VERSION
capsule-gold-qwerty Ready <none> 43h v1.19.1
```
> Warning: when no `nodeSelector` is specified, the tenant owners has access to all the nodes, according to the permissions listed in the `proxySettings` specs.
### Special routes for kubectl describe
When issuing a `kubectl describe node`, some other endpoints are put in place:
* `api/v1/pods?fieldSelector=spec.nodeName%3D{name}`
* `/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/{name}`
These are mandatory in order to retrieve the list of the running Pods on the required node, and providing info about the lease status of it.
### Nodes
The Capsule Proxy gives the owners the ability to access the nodes matching the `.spec.nodeSelector` in the Tenant manifest:
```yaml
@@ -345,4 +411,10 @@ $ TOKEN=<type your TOKEN>
$ curl -H "Authorization: Bearer $TOKEN" http://localhost:9001/api/v1/namespaces
```
> NOTE: `kubectl` will not work against a `http` server.
> NOTE: `kubectl` will not work against a `http` server.
## Contributing
`capsule-proxy` is an open-source software released with Apache2 [license](https://github.com/clastix/capsule-proxy/blob/master/LICENSE).
Contributing guidelines are available [here](https://github.com/clastix/capsule-proxy/blob/master/CONTRIBUTING.md).

View File

@@ -586,7 +586,7 @@ By setting enforcement at the namespace level, i.e. `spec.resourceQuotas.scope=N
Bill, the cluster admin, can also set Limit Ranges for each namespace in Alice's tenant by defining limits for pods and containers in the tenant spec:
```yaml
apiVersion: capsule.clastix.io/v1alpha1
apiVersion: capsule.clastix.io/v1beta1
kind: Tenant
metadata:
name: oil
@@ -1312,6 +1312,58 @@ With the above example, Capsule is leaving the tenant owner to create namespaced
> Take Note: a tenant owner having the admin scope on its namespaces only, does not have the permission to create Custom Resources Definitions (CRDs) because this requires a cluster admin permission level. Only Bill, the cluster admin, can create CRDs. This is a known limitation of any multi-tenancy environment based on a single shared control plane.
## Assign custom resources quota
Kubernetes offers by default `ResourceQuota` resources, aimed to limit the number of basic primitives in a Namespace.
Capsule already provides the sharing of these constraints across the Tenant Namespaces, however, limiting the amount of namespaced Custom Resources instances is not upstream-supported.
Starting from Capsule **v0.1.1**, this can be done using a special annotation in the Tenant manifest.
Imagine the case where a Custom Resource named `MySQL` in the API group `databases.acme.corp/v1` usage must be limited in the Tenant `oil`: this can be done as follows.
```yaml
apiVersion: capsule.clastix.io/v1beta1
kind: Tenant
metadata:
name: oil
annotations:
quota.resources.capsule.clastix.io/mysqls.databases.acme.corp_v1: "3"
spec:
additionalRoleBindings:
- clusterRoleName: mysql-namespace-admin
subjects:
- kind: User
name: alice
owners:
- name: alice
kind: User
```
> The Additional Role Binding referring to the Cluster Role `mysql-namespace-admin` is required to let Alice manage their Custom Resource instances.
> The pattern for the `quota.resources.capsule.clastix.io` annotation is the following:
> `quota.resources.capsule.clastix.io/${PLURAL_NAME}.${API_GROUP}_${API_VERSION}`
>
> You can figure out the required fields using `kubectl api-resources`.
When `alice` will create a `MySQL` instance in one of their Tenant Namespace, the Cluster Administrator can easily retrieve the overall usage.
```yaml
apiVersion: capsule.clastix.io/v1beta1
kind: Tenant
metadata:
name: oil
annotations:
quota.resources.capsule.clastix.io/mysqls.databases.acme.corp_v1: "3"
used.resources.capsule.clastix.io/mysqls.databases.acme.corp_v1: "1"
spec:
owners:
- name: alice
kind: User
```
> This feature is still in an alpha stage and requires a high amount of computing resources due to the dynamic client requests.
## Taint namespaces
With Capsule, Bill can _"taint"_ the namespaces created by Alice with additional labels and/or annotations. There is no specific semantic assigned to these labels and annotations: they just will be assigned to the namespaces in the tenant as they are created by Alice. This can help the cluster admin to implement specific use cases. As it can be used to implement backup as a service for namespaces in the tenant.
@@ -1564,4 +1616,8 @@ EOF
>* v1.20.6
>* v1.21.0
This ends our tutorial on how to implement complex multi-tenancy and policy-driven scenarios with Capsule. As we improve it, more use cases about multi-tenancy, policy admission control, and cluster governance will be covered in the future. Stay tuned!
---
This ends our tutorial on how to implement complex multi-tenancy and policy-driven scenarios with Capsule. As we improve it, more use cases about multi-tenancy, policy admission control, and cluster governance will be covered in the future.
Stay tuned!

View File

@@ -0,0 +1,73 @@
# Upgrading Tenant resource from v1alpha1 to v1beta1 version
With [Capsule v0.1.0](https://github.com/clastix/capsule/releases/tag/v0.1.0), the Tenant custom resource has been bumped to `v1beta1` from `v1alpha1` with additional fields addressing the new features implemented so far.
This document aims to provide support and a guide on how to perform a clean upgrade to the latest API version in order to avoid service disruption and data loss.
## Backup your cluster
We strongly suggest performing a full backup of your Kubernetes cluster, such as storage and etcd.
Use your favorite tool according to your needs.
## Uninstall the old Capsule release
If you're using Helm as package manager, all the Operator resources such as Deployment, Service, Role Binding, and etc. must be deleted.
```
helm uninstall -n capsule-system capsule
```
Ensure that everything has been removed correctly, especially the Secret resources.
## Patch the Tenant custom resource definition
Helm doesn't manage the lifecycle of Custom Resource Definitions, additional details can be found [here](https://github.com/helm/community/blob/f9e06c16d89ccea1bea77c01a6a96ae3b309f823/architecture/crds.md).
This process must be executed manually as follows:
```
kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/v0.1.0/config/crd/bases/capsule.clastix.io_tenants.yaml
```
> Please note the Capsule version in the said URL, your mileage may vary according to the desired upgrading version.
## Install the Capsule operator using Helm
Since the Tenant custom resource definition has been patched with new fields, we can install back Capsule using the provided Helm chart.
```
helm upgrade --install capsule clastix/capsule -n capsule-system --create-namespace
```
This will start the Operator that will perform several required actions, such as:
1. Generating a new CA
2. Generating new TLS certificates for the local webhook server
3. Patching the Validating and Mutating Webhook Configuration resources with the fresh new CA
4. Patching the Custom Resource Definition tenant conversion webhook CA
## Ensure the conversion webhook is working
Kubernetes Custom Resource definitions provide a conversion webhook that is used by an Operator to perform seamless conversion between resources with different versioning.
With the fresh new installation, Capsule patched all the required moving parts to ensure this conversion is put in place, and using the latest version (actually, `v1beta1`) for presenting the Tenant resources.
You can check this behavior by issuing the following command:
```
$: kubectl get tenants.v1beta1.capsule.clastix.io
NAME NAMESPACE QUOTA NAMESPACE COUNT OWNER NAME OWNER KIND NODE SELECTOR AGE
oil 3 0 alice User {"kubernetes.io/os":"linux"} 3m43s
```
You should see all the previous Tenant resources converted in the new format and structure.
```
$: kubectl get tenants.v1beta1.capsule.clastix.io
NAME STATE NAMESPACE QUOTA NAMESPACE COUNT NODE SELECTOR AGE
oil Active 3 0 {"kubernetes.io/os":"linux"} 3m38s
```
> Resources are still persisted in etcd using the `v1alpha1` specification and the conversion is executed on-the-fly thanks to the conversion webhook.
> If you'd like to decrease the pressure on Capsule due to the conversion webhook, we suggest performing a resource patching using the command `kubectl replace`:
> in this way, the API Server will update the etcd key with the specification according to the new versioning, allowing to skip the conversion.

View File

@@ -37,6 +37,10 @@ module.exports = function (api) {
label: 'References',
path: '/docs/general/references'
},
{
label: 'Multi-Tenant Benchmark',
path: '/docs/general/mtb'
},
{
label: 'Capsule Proxy',
path: '/docs/general/proxy'
@@ -62,6 +66,10 @@ module.exports = function (api) {
label: 'Backup & Restore with Velero',
path: '/docs/guides/velero'
},
{
label: 'Upgrading Tenant version',
path: '/docs/guides/upgrading'
},
{
title: 'Managed Kubernetes',
subItems: [

View File

@@ -0,0 +1,156 @@
//go:build e2e
// +build e2e
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package e2e
import (
"context"
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes/scheme"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
)
var _ = Describe("when Tenant limits custom Resource Quota", func() {
tnt := &capsulev1beta1.Tenant{
ObjectMeta: metav1.ObjectMeta{
Name: "limiting-resources",
Annotations: map[string]string{
"quota.resources.capsule.clastix.io/foos.test.clastix.io_v1": "3",
},
},
Spec: capsulev1beta1.TenantSpec{
Owners: capsulev1beta1.OwnerListSpec{
{
Name: "resource",
Kind: "User",
},
},
},
}
crd := &v1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "foos.test.clastix.io",
},
Spec: v1.CustomResourceDefinitionSpec{
Group: "test.clastix.io",
Names: v1.CustomResourceDefinitionNames{
Kind: "Foo",
ListKind: "FooList",
Plural: "foos",
Singular: "foo",
},
Scope: v1.NamespaceScoped,
Versions: []v1.CustomResourceDefinitionVersion{
{
Name: "v1",
Served: true,
Storage: true,
Schema: &v1.CustomResourceValidation{
OpenAPIV3Schema: &v1.JSONSchemaProps{
Type: "object",
Properties: map[string]v1.JSONSchemaProps{
"apiVersion": {
Type: "string",
},
"kind": {
Type: "string",
},
"metadata": {
Type: "object",
},
},
},
},
},
},
},
}
JustBeforeEach(func() {
utilruntime.Must(v1.AddToScheme(scheme.Scheme))
EventuallyCreation(func() error {
return k8sClient.Create(context.TODO(), crd)
}).Should(Succeed())
EventuallyCreation(func() error {
return k8sClient.Create(context.TODO(), tnt)
}).Should(Succeed())
})
JustAfterEach(func() {
Expect(k8sClient.Delete(context.TODO(), crd)).Should(Succeed())
Expect(k8sClient.Delete(context.TODO(), tnt)).Should(Succeed())
})
It("should block resources in overflow", func() {
dynamicClient := dynamic.NewForConfigOrDie(cfg)
for _, i := range []int{1, 2, 3} {
ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i))
NamespaceCreation(ns, tnt.Spec.Owners[0], defaultTimeoutInterval).Should(Succeed())
TenantNamespaceList(tnt, defaultTimeoutInterval).Should(ContainElement(ns.GetName()))
obj := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": fmt.Sprintf("%s/%s", crd.Spec.Group, crd.Spec.Versions[0].Name),
"kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": fmt.Sprintf("resource-%d", i),
},
},
}
EventuallyCreation(func() (err error) {
_, err = dynamicClient.Resource(schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Versions[0].Name, Resource: crd.Spec.Names.Plural}).Namespace(ns.GetName()).Create(context.Background(), obj, metav1.CreateOptions{})
return
}).ShouldNot(HaveOccurred())
}
for _, i := range []int{1, 2, 3} {
ns := NewNamespace(fmt.Sprintf("resource-ns-%d", i))
obj := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": fmt.Sprintf("%s/%s", crd.Spec.Group, crd.Spec.Versions[0].Name),
"kind": crd.Spec.Names.Kind,
"metadata": map[string]interface{}{
"name": fmt.Sprintf("fail-%d", i),
},
},
}
EventuallyCreation(func() (err error) {
_, err = dynamicClient.Resource(schema.GroupVersionResource{Group: crd.Spec.Group, Version: crd.Spec.Versions[0].Name, Resource: crd.Spec.Names.Plural}).Namespace(ns.GetName()).Create(context.Background(), obj, metav1.CreateOptions{})
return
}).Should(HaveOccurred())
}
Expect(k8sClient.Get(context.Background(), types.NamespacedName{Name: tnt.GetName()}, tnt)).ShouldNot(HaveOccurred())
Eventually(func() bool {
limit, _ := HaveKeyWithValue("quota.resources.capsule.clastix.io/foos.test.clastix.io_v1", "3").Match(tnt.GetAnnotations())
used, _ := HaveKeyWithValue("used.resources.capsule.clastix.io/foos.test.clastix.io_v1", "3").Match(tnt.GetAnnotations())
return limit && used
}, defaultTimeoutInterval, defaultPollInterval).Should(BeTrue())
})
})

176
main.go
View File

@@ -15,6 +15,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilVersion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
@@ -22,8 +23,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/clastix/capsule/pkg/webhook/node"
capsulev1alpha1 "github.com/clastix/capsule/api/v1alpha1"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
configcontroller "github.com/clastix/capsule/controllers/config"
@@ -37,6 +36,7 @@ import (
"github.com/clastix/capsule/pkg/webhook/ingress"
namespacewebhook "github.com/clastix/capsule/pkg/webhook/namespace"
"github.com/clastix/capsule/pkg/webhook/networkpolicy"
"github.com/clastix/capsule/pkg/webhook/node"
"github.com/clastix/capsule/pkg/webhook/ownerreference"
"github.com/clastix/capsule/pkg/webhook/pod"
"github.com/clastix/capsule/pkg/webhook/pvc"
@@ -44,7 +44,6 @@ import (
"github.com/clastix/capsule/pkg/webhook/service"
"github.com/clastix/capsule/pkg/webhook/tenant"
"github.com/clastix/capsule/pkg/webhook/utils"
// +kubebuilder:scaffold:imports
)
var (
@@ -123,12 +122,6 @@ func main() {
os.Exit(1)
}
kubeVersion, err := utils.GetK8sVersion()
if err != nil {
setupLog.Error(err, "unable to get kubernetes version")
os.Exit(1)
}
_ = manager.AddReadyzCheck("ping", healthz.Ping)
_ = manager.AddHealthzCheck("ping", healthz.Ping)
@@ -154,75 +147,6 @@ func main() {
os.Exit(1)
}
// +kubebuilder:scaffold:builder
cfg := configuration.NewCapsuleConfiguration(manager.GetClient(), configurationName)
// webhooks: the order matters, don't change it and just append
webhooksList := append(
make([]webhook.Webhook, 0),
route.Pod(pod.ImagePullPolicy(), pod.ContainerRegistry(), pod.PriorityClass()),
route.Namespace(utils.InCapsuleGroups(cfg, namespacewebhook.QuotaHandler(), namespacewebhook.FreezeHandler(cfg), namespacewebhook.PrefixHandler(cfg), namespacewebhook.UserMetadataHandler())),
route.Ingress(ingress.Class(cfg), ingress.Hostnames(cfg), ingress.Collision(cfg), ingress.Wildcard()),
route.PVC(pvc.Handler()),
route.Service(service.Handler()),
route.NetworkPolicy(utils.InCapsuleGroups(cfg, networkpolicy.Handler())),
route.Tenant(tenant.NameHandler(), tenant.RoleBindingRegexHandler(), tenant.IngressClassRegexHandler(), tenant.StorageClassRegexHandler(), tenant.ContainerRegistryRegexHandler(), tenant.HostnameRegexHandler(), tenant.FreezedEmitter(), tenant.ServiceAccountNameHandler()),
route.OwnerReference(utils.InCapsuleGroups(cfg, ownerreference.Handler(cfg))),
route.Cordoning(tenant.CordoningHandler(cfg)),
route.Node(utils.InCapsuleGroups(cfg, node.UserMetadataHandler(cfg, kubeVersion))),
)
nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion)
if !nodeWebhookSupported {
setupLog.Info("Disabling node labels verification webhook as current Kubernetes version doesn't have fix for CVE-2021-25735")
}
if err = webhook.Register(manager, webhooksList...); err != nil {
setupLog.Error(err, "unable to setup webhooks")
os.Exit(1)
}
rbacManager := &rbaccontroller.Manager{
Log: ctrl.Log.WithName("controllers").WithName("Rbac"),
Configuration: cfg,
}
if err = manager.Add(rbacManager); err != nil {
setupLog.Error(err, "unable to create cluster roles")
os.Exit(1)
}
if err = rbacManager.SetupWithManager(manager, configurationName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Rbac")
os.Exit(1)
}
if err = (&servicelabelscontroller.ServicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("ServiceLabels"),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ServiceLabels")
os.Exit(1)
}
if err = (&servicelabelscontroller.EndpointsLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointLabels"),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointLabels")
os.Exit(1)
}
if err = (&servicelabelscontroller.EndpointSlicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointSliceLabels"),
VersionMinor: kubeVersion.Minor(),
VersionMajor: kubeVersion.Major(),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointSliceLabels")
}
if err = (&configcontroller.Manager{
Log: ctrl.Log.WithName("controllers").WithName("CapsuleConfiguration"),
}).SetupWithManager(manager, configurationName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CapsuleConfiguration")
os.Exit(1)
}
clientset, err := kubernetes.NewForConfig(ctrl.GetConfigOrDie())
if err != nil {
setupLog.Error(err, "unable to create kubernetes clientset")
@@ -235,16 +159,24 @@ func main() {
os.Exit(1)
}
if len(ca.Data) > 0 {
tls, err := clientset.CoreV1().Secrets(namespace).Get(ctx, secretcontroller.TLSSecretName, metav1.GetOptions{})
if err != nil {
setupLog.Error(err, "unable to get Capsule TLS secret")
os.Exit(1)
}
if len(ca.Data) > 0 && len(tls.Data) > 0 {
if err = (&tenantcontroller.Manager{
Client: manager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Tenant"),
Scheme: manager.GetScheme(),
Recorder: manager.GetEventRecorderFor("tenant-controller"),
RESTConfig: manager.GetConfig(),
Client: manager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("Tenant"),
Scheme: manager.GetScheme(),
Recorder: manager.GetEventRecorderFor("tenant-controller"),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Tenant")
os.Exit(1)
}
if err = (&capsulev1alpha1.Tenant{}).SetupWebhookWithManager(manager); err != nil {
setupLog.Error(err, "unable to create conversion webhook", "webhook", "Tenant")
os.Exit(1)
@@ -254,6 +186,84 @@ func main() {
setupLog.Error(err, "unable to setup indexers")
os.Exit(1)
}
var kubeVersion *utilVersion.Version
kubeVersion, err = utils.GetK8sVersion()
if err != nil {
setupLog.Error(err, "unable to get kubernetes version")
os.Exit(1)
}
cfg := configuration.NewCapsuleConfiguration(manager.GetClient(), configurationName)
// webhooks: the order matters, don't change it and just append
webhooksList := append(
make([]webhook.Webhook, 0),
route.Pod(pod.ImagePullPolicy(), pod.ContainerRegistry(), pod.PriorityClass()),
route.Namespace(utils.InCapsuleGroups(cfg, namespacewebhook.QuotaHandler(), namespacewebhook.FreezeHandler(cfg), namespacewebhook.PrefixHandler(cfg), namespacewebhook.UserMetadataHandler())),
route.Ingress(ingress.Class(cfg), ingress.Hostnames(cfg), ingress.Collision(cfg), ingress.Wildcard()),
route.PVC(pvc.Handler()),
route.Service(service.Handler()),
route.NetworkPolicy(utils.InCapsuleGroups(cfg, networkpolicy.Handler())),
route.Tenant(tenant.NameHandler(), tenant.RoleBindingRegexHandler(), tenant.IngressClassRegexHandler(), tenant.StorageClassRegexHandler(), tenant.ContainerRegistryRegexHandler(), tenant.HostnameRegexHandler(), tenant.FreezedEmitter(), tenant.ServiceAccountNameHandler()),
route.OwnerReference(utils.InCapsuleGroups(cfg, ownerreference.Handler(cfg))),
route.Cordoning(tenant.CordoningHandler(cfg), tenant.ResourceCounterHandler()),
route.Node(utils.InCapsuleGroups(cfg, node.UserMetadataHandler(cfg, kubeVersion))),
)
nodeWebhookSupported, _ := utils.NodeWebhookSupported(kubeVersion)
if !nodeWebhookSupported {
setupLog.Info("Disabling node labels verification webhook as current Kubernetes version doesn't have fix for CVE-2021-25735")
}
if err = webhook.Register(manager, webhooksList...); err != nil {
setupLog.Error(err, "unable to setup webhooks")
os.Exit(1)
}
rbacManager := &rbaccontroller.Manager{
Log: ctrl.Log.WithName("controllers").WithName("Rbac"),
Configuration: cfg,
}
if err = manager.Add(rbacManager); err != nil {
setupLog.Error(err, "unable to create cluster roles")
os.Exit(1)
}
if err = rbacManager.SetupWithManager(manager, configurationName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Rbac")
os.Exit(1)
}
if err = (&servicelabelscontroller.ServicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("ServiceLabels"),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ServiceLabels")
os.Exit(1)
}
if err = (&servicelabelscontroller.EndpointsLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointLabels"),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointLabels")
os.Exit(1)
}
if err = (&servicelabelscontroller.EndpointSlicesLabelsReconciler{
Log: ctrl.Log.WithName("controllers").WithName("EndpointSliceLabels"),
VersionMinor: kubeVersion.Minor(),
VersionMajor: kubeVersion.Major(),
}).SetupWithManager(manager); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "EndpointSliceLabels")
}
if err = (&configcontroller.Manager{
Log: ctrl.Log.WithName("controllers").WithName("CapsuleConfiguration"),
}).SetupWithManager(manager, configurationName); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "CapsuleConfiguration")
os.Exit(1)
}
} else {
setupLog.Info("skip registering a tenant controller, missing CA secret")
}

View File

@@ -0,0 +1,151 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package tenant
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
capsulev1beta1 "github.com/clastix/capsule/api/v1beta1"
capsulewebhook "github.com/clastix/capsule/pkg/webhook"
"github.com/clastix/capsule/pkg/webhook/utils"
)
type resourceCounterHandler struct {
client client.Client
}
func (r *resourceCounterHandler) InjectClient(c client.Client) error {
r.client = c
return nil
}
func ResourceCounterHandler() capsulewebhook.Handler {
return &resourceCounterHandler{}
}
func (r *resourceCounterHandler) getTenantName(ctx context.Context, clt client.Client, req admission.Request) (string, error) {
tntList := &capsulev1beta1.TenantList{}
if err := clt.List(ctx, tntList, client.MatchingFieldsSelector{
Selector: fields.OneTermEqualSelector(".status.namespaces", req.Namespace),
}); err != nil {
return "", err
}
if len(tntList.Items) == 0 {
return "", nil
}
return tntList.Items[0].GetName(), nil
}
func (r *resourceCounterHandler) OnCreate(clt client.Client, decoder *admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
var tntName string
var err error
if tntName, err = r.getTenantName(ctx, clt, req); err != nil {
return utils.ErroredResponse(err)
}
if len(tntName) == 0 {
return nil
}
kgv := fmt.Sprintf("%s.%s_%s", req.Resource.Resource, req.Resource.Group, req.Resource.Version)
tnt := &capsulev1beta1.Tenant{}
var limit int64
err = retry.RetryOnConflict(retry.DefaultRetry, func() (retryErr error) {
if retryErr = clt.Get(ctx, types.NamespacedName{Name: tntName}, tnt); err != nil {
return retryErr
}
if limit, retryErr = capsulev1beta1.GetLimitResourceFromTenant(*tnt, kgv); retryErr != nil {
return nil
}
used, _ := capsulev1beta1.GetUsedResourceFromTenant(*tnt, kgv)
if used >= limit {
return NewCustomResourceQuotaError(kgv, limit)
}
tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(kgv)] = fmt.Sprintf("%d", used+1)
return clt.Update(ctx, tnt)
})
if err != nil {
if _, ok := err.(*customResourceQuotaError); ok {
recorder.Eventf(tnt, corev1.EventTypeWarning, "ResourceQuota", "Resource %s/%s in API group %s cannot be created, limit usage of %d has been reached", req.Namespace, req.Name, kgv, limit)
}
return utils.ErroredResponse(err)
}
return nil
}
}
func (r *resourceCounterHandler) OnDelete(clt client.Client, decoder *admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
var tntName string
var err error
if tntName, err = r.getTenantName(ctx, clt, req); err != nil {
return utils.ErroredResponse(err)
}
if len(tntName) == 0 {
return nil
}
kgv := fmt.Sprintf("%s.%s_%s", req.Resource.Resource, req.Resource.Group, req.Resource.Version)
err = retry.RetryOnConflict(retry.DefaultRetry, func() (retryErr error) {
tnt := &capsulev1beta1.Tenant{}
if retryErr = clt.Get(ctx, types.NamespacedName{Name: tntName}, tnt); err != nil {
return
}
if tnt.Annotations == nil {
return
}
if _, ok := tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(kgv)]; !ok {
return
}
used, _ := capsulev1beta1.GetUsedResourceFromTenant(*tnt, kgv)
tnt.Annotations[capsulev1beta1.UsedAnnotationForResource(kgv)] = fmt.Sprintf("%d", used-1)
return clt.Update(ctx, tnt)
})
if err != nil {
return utils.ErroredResponse(err)
}
return nil
}
}
func (r *resourceCounterHandler) OnUpdate(client client.Client, decoder *admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
return func(ctx context.Context, req admission.Request) *admission.Response {
return nil
}
}

View File

@@ -0,0 +1,22 @@
// Copyright 2020-2021 Clastix Labs
// SPDX-License-Identifier: Apache-2.0
package tenant
import "fmt"
type customResourceQuotaError struct {
kindGroup string
limit int64
}
func NewCustomResourceQuotaError(kindGroup string, limit int64) error {
return &customResourceQuotaError{
kindGroup: kindGroup,
limit: limit,
}
}
func (r customResourceQuotaError) Error() string {
return fmt.Sprintf("resource %s has reached quota limit of %d items", r.kindGroup, r.limit)
}