mirror of
https://github.com/projectcapsule/capsule.git
synced 2026-02-19 20:39:51 +00:00
Compare commits
23 Commits
helm-v0.3.
...
v0.3.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03f8963309 | ||
|
|
0eff100c21 | ||
|
|
ff44aa17d1 | ||
|
|
d791fdb996 | ||
|
|
e0f47bc3ec | ||
|
|
9f184d70e7 | ||
|
|
7ac0d43b8d | ||
|
|
47dd56fbaf | ||
|
|
66f5f90104 | ||
|
|
010ed41ca7 | ||
|
|
92b1debe6b | ||
|
|
e64b3f8cf9 | ||
|
|
ac4f0ab6dd | ||
|
|
89348c9499 | ||
|
|
da78423f42 | ||
|
|
3991359bfe | ||
|
|
f0fdab015b | ||
|
|
610a03d0b9 | ||
|
|
018784564a | ||
|
|
8e7078ad4f | ||
|
|
4e5c00fa65 | ||
|
|
d63a9a0ca6 | ||
|
|
7d1772031c |
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -24,9 +24,9 @@ jobs:
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
version: v1.45.2
|
||||
version: v1.51.2
|
||||
only-new-issues: false
|
||||
args: --timeout 2m --config .golangci.yml
|
||||
args: --timeout 5m --config .golangci.yml
|
||||
diff:
|
||||
name: diff
|
||||
runs-on: ubuntu-18.04
|
||||
|
||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -32,7 +32,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s-version: ['v1.16.15', 'v1.17.11', 'v1.18.8', 'v1.19.4', 'v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.6', 'v1.24.7', 'v1.25.3', 'v1.26.0']
|
||||
k8s-version: ['v1.20.7', 'v1.21.2', 'v1.22.4', 'v1.23.6', 'v1.24.7', 'v1.25.3', 'v1.26.1']
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
@@ -39,9 +39,16 @@ linters:
|
||||
- testpackage
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- exhaustruct
|
||||
- varcheck
|
||||
- structcheck
|
||||
- nosnakecase
|
||||
- deadcode
|
||||
- ifshort
|
||||
- nonamedreturns
|
||||
|
||||
service:
|
||||
golangci-lint-version: 1.45.2
|
||||
golangci-lint-version: 1.51.2
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
|
||||
5
Makefile
5
Makefile
@@ -240,7 +240,7 @@ goimports:
|
||||
|
||||
GOLANGCI_LINT = $(shell pwd)/bin/golangci-lint
|
||||
golangci-lint: ## Download golangci-lint locally if necessary.
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.45.2)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2)
|
||||
|
||||
# Linting code as PR is expecting
|
||||
.PHONY: golint
|
||||
@@ -250,7 +250,7 @@ golint: golangci-lint
|
||||
# Running e2e tests in a KinD instance
|
||||
.PHONY: e2e
|
||||
e2e/%: ginkgo
|
||||
$(MAKE) e2e-build/$* && $(MAKE) e2e-exec || $(MAKE) e2e-destroy
|
||||
$(MAKE) e2e-build/$* && $(MAKE) e2e-exec && $(MAKE) e2e-destroy
|
||||
|
||||
e2e-build/%:
|
||||
kind create cluster --wait=60s --name capsule --image=kindest/node:$*
|
||||
@@ -266,6 +266,7 @@ e2e-build/%:
|
||||
--set "manager.image.tag=$(VERSION)" \
|
||||
--set 'manager.livenessProbe.failureThreshold=10' \
|
||||
--set 'manager.readinessProbe.failureThreshold=10' \
|
||||
--set 'podSecurityContext.seccompProfile=null' \
|
||||
capsule \
|
||||
./charts/capsule
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
|
||||
<p align="left">
|
||||
<img src="https://github.com/clastix/capsule/actions/workflows/ci.yml/badge.svg"/>
|
||||
<img src="https://img.shields.io/github/license/clastix/capsule"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/capsule"/>
|
||||
<a href="https://github.com/clastix/capsule/releases">
|
||||
|
||||
@@ -133,7 +133,7 @@ func (in *Tenant) convertV1Alpha1OwnerToV1Beta1() capsulev1beta1.OwnerListSpec {
|
||||
return owners
|
||||
}
|
||||
|
||||
// nolint:gocognit,gocyclo,cyclop,maintidx
|
||||
//nolint:gocognit,gocyclo,cyclop,maintidx
|
||||
func (in *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
dst, ok := dstRaw.(*capsulev1beta1.Tenant)
|
||||
if !ok {
|
||||
@@ -365,7 +365,7 @@ func (in *Tenant) ConvertTo(dstRaw conversion.Hub) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint:gocognit,gocyclo,cyclop
|
||||
//nolint:gocognit,gocyclo,cyclop
|
||||
func (in *Tenant) convertV1Beta1OwnerToV1Alpha1(src *capsulev1beta1.Tenant) {
|
||||
ownersAnnotations := map[string][]string{
|
||||
ownerGroupsAnnotation: nil,
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
"github.com/clastix/capsule/pkg/api"
|
||||
)
|
||||
|
||||
// nolint:maintidx
|
||||
//nolint:maintidx
|
||||
func generateTenantsSpecs() (Tenant, capsulev1beta1.Tenant) {
|
||||
var namespaceQuota int32 = 5
|
||||
|
||||
|
||||
@@ -75,9 +75,7 @@ func init() {
|
||||
func (in *Tenant) GetNamespaces() (res []string) {
|
||||
res = make([]string, 0, len(in.Status.Namespaces))
|
||||
|
||||
for _, ns := range in.Status.Namespaces {
|
||||
res = append(res, ns)
|
||||
}
|
||||
res = append(res, in.Status.Namespaces...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func (p ProxyOperation) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=Nodes;StorageClasses;IngressClasses;PriorityClasses
|
||||
// +kubebuilder:validation:Enum=Nodes;StorageClasses;IngressClasses;PriorityClasses;RuntimeClasses;PersistentVolumes
|
||||
type ProxyServiceKind string
|
||||
|
||||
func (p ProxyServiceKind) String() string {
|
||||
@@ -42,10 +42,12 @@ func (p ProxyServiceKind) String() string {
|
||||
}
|
||||
|
||||
const (
|
||||
NodesProxy ProxyServiceKind = "Nodes"
|
||||
StorageClassesProxy ProxyServiceKind = "StorageClasses"
|
||||
IngressClassesProxy ProxyServiceKind = "IngressClasses"
|
||||
PriorityClassesProxy ProxyServiceKind = "PriorityClasses"
|
||||
NodesProxy ProxyServiceKind = "Nodes"
|
||||
StorageClassesProxy ProxyServiceKind = "StorageClasses"
|
||||
IngressClassesProxy ProxyServiceKind = "IngressClasses"
|
||||
PriorityClassesProxy ProxyServiceKind = "PriorityClasses"
|
||||
RuntimeClassesProxy ProxyServiceKind = "RuntimeClasses"
|
||||
PersistentVolumesProxy ProxyServiceKind = "PersistentVolumes"
|
||||
|
||||
ListOperation ProxyOperation = "List"
|
||||
UpdateOperation ProxyOperation = "Update"
|
||||
|
||||
@@ -76,9 +76,7 @@ type Tenant struct {
|
||||
func (in *Tenant) GetNamespaces() (res []string) {
|
||||
res = make([]string, 0, len(in.Status.Namespaces))
|
||||
|
||||
for _, ns := range in.Status.Namespaces {
|
||||
res = append(res, ns)
|
||||
}
|
||||
res = append(res, in.Status.Namespaces...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ sources:
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.4
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.2.1
|
||||
appVersion: 0.3.0
|
||||
|
||||
@@ -66,6 +66,7 @@ Here the values you can override:
|
||||
| certManager.generateCertificates | bool | `false` | Specifies whether capsule webhooks certificates should be generated using cert-manager |
|
||||
| customAnnotations | object | `{}` | Additional annotations which will be added to all resources created by Capsule helm chart |
|
||||
| customLabels | object | `{}` | Additional labels which will be added to all resources created by Capsule helm chart |
|
||||
| imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
|
||||
| jobs.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy of the helm chart job |
|
||||
| jobs.image.repository | string | `"clastix/kubectl"` | Set the image repository of the helm chart job |
|
||||
| jobs.image.tag | string | `""` | Set the image tag of the helm chart job |
|
||||
@@ -94,7 +95,6 @@ Here the values you can override:
|
||||
| manager.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy. |
|
||||
| manager.image.repository | string | `"clastix/capsule"` | Set the image repository of the capsule. |
|
||||
| manager.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| manager.imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
|
||||
| manager.kind | string | `"Deployment"` | Set the controller deployment mode as `Deployment` or `DaemonSet`. |
|
||||
| manager.livenessProbe | object | `{"httpGet":{"path":"/healthz","port":10080}}` | Configure the liveness probe using Deployment probe spec |
|
||||
| manager.options.capsuleUserGroups | list | `["capsule.clastix.io"]` | Override the Capsule user groups |
|
||||
@@ -107,6 +107,7 @@ Here the values you can override:
|
||||
| manager.resources.limits.memory | string | `"128Mi"` | |
|
||||
| manager.resources.requests.cpu | string | `"200m"` | |
|
||||
| manager.resources.requests.memory | string | `"128Mi"` | |
|
||||
| manager.webhookPort | int | `9443` | Set an alternative to the default container port. Useful for use in some kubernetes clusters (such as GKE Private) with aggregator routing turned on, because pod ports have to be opened manually on the firewall side |
|
||||
|
||||
### ServiceMonitor Parameters
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -60,6 +60,7 @@ spec:
|
||||
command:
|
||||
- /manager
|
||||
args:
|
||||
- --webhook-port={{ .Values.manager.webhookPort }}
|
||||
- --enable-leader-election
|
||||
- --zap-log-level={{ default 4 .Values.manager.options.logLevel }}
|
||||
- --configuration-name=default
|
||||
@@ -72,7 +73,7 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: webhook-server
|
||||
containerPort: 9443
|
||||
containerPort: {{ .Values.manager.webhookPort }}
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
|
||||
@@ -59,6 +59,7 @@ spec:
|
||||
command:
|
||||
- /manager
|
||||
args:
|
||||
- --webhook-port={{ .Values.manager.webhookPort }}
|
||||
- --enable-leader-election
|
||||
- --zap-log-level={{ default 4 .Values.manager.options.logLevel }}
|
||||
- --configuration-name=default
|
||||
@@ -71,7 +72,7 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: webhook-server
|
||||
containerPort: 9443
|
||||
containerPort: {{ .Values.manager.webhookPort }}
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
containerPort: 8080
|
||||
|
||||
@@ -13,7 +13,7 @@ spec:
|
||||
- port: 443
|
||||
name: https
|
||||
protocol: TCP
|
||||
targetPort: 9443
|
||||
targetPort: {{ .Values.manager.webhookPort }}
|
||||
selector:
|
||||
{{- include "capsule.selectorLabels" . | nindent 4 }}
|
||||
sessionAffinity: None
|
||||
|
||||
@@ -25,9 +25,6 @@ manager:
|
||||
# -- Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ''
|
||||
|
||||
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Specifies if the container should be started in hostNetwork mode.
|
||||
#
|
||||
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
|
||||
@@ -35,6 +32,13 @@ manager:
|
||||
# with pods' IP CIDR and admission webhooks are not working
|
||||
hostNetwork: false
|
||||
|
||||
# -- Set an alternative to the default container port.
|
||||
#
|
||||
# Useful for use in some kubernetes clusters (such as GKE Private) with
|
||||
# aggregator routing turned on, because pod ports have to be opened manually
|
||||
# on the firewall side
|
||||
webhookPort: 9443
|
||||
|
||||
# Additional Capsule Controller Options
|
||||
options:
|
||||
# -- Set the log verbosity of the capsule with a value from 1 to 10
|
||||
@@ -68,6 +72,9 @@ manager:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
|
||||
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Annotations to add to the capsule pod.
|
||||
podAnnotations: {}
|
||||
# The following annotations guarantee scheduling for critical add-on pods
|
||||
|
||||
@@ -2840,6 +2840,8 @@ spec:
|
||||
- StorageClasses
|
||||
- IngressClasses
|
||||
- PriorityClasses
|
||||
- RuntimeClasses
|
||||
- PersistentVolumes
|
||||
type: string
|
||||
operations:
|
||||
items:
|
||||
|
||||
@@ -2416,6 +2416,8 @@ spec:
|
||||
- StorageClasses
|
||||
- IngressClasses
|
||||
- PriorityClasses
|
||||
- RuntimeClasses
|
||||
- PersistentVolumes
|
||||
type: string
|
||||
operations:
|
||||
items:
|
||||
@@ -2767,7 +2769,7 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
image: clastix/capsule:v0.2.1
|
||||
image: clastix/capsule:v0.3.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: manager
|
||||
ports:
|
||||
|
||||
@@ -7,4 +7,4 @@ kind: Kustomization
|
||||
images:
|
||||
- name: controller
|
||||
newName: clastix/capsule
|
||||
newTag: v0.2.1
|
||||
newTag: v0.3.0
|
||||
|
||||
@@ -121,7 +121,7 @@ func (r *Global) reconcileNormal(ctx context.Context, tntResource *capsulev1beta
|
||||
}
|
||||
|
||||
if tntResource.Status.ProcessedItems == nil {
|
||||
tntResource.Status.ProcessedItems = make([]capsulev1beta2.ObjectReferenceStatus, 0, 0)
|
||||
tntResource.Status.ProcessedItems = make([]capsulev1beta2.ObjectReferenceStatus, 0)
|
||||
}
|
||||
|
||||
// Retrieving the list of the Tenants up to the selector provided by the GlobalTenantResource resource.
|
||||
|
||||
@@ -85,7 +85,7 @@ func (r *Namespaced) reconcileNormal(ctx context.Context, tntResource *capsulev1
|
||||
|
||||
// Adding the default value for the status
|
||||
if tntResource.Status.ProcessedItems == nil {
|
||||
tntResource.Status.ProcessedItems = make([]capsulev1beta2.ObjectReferenceStatus, 0, 0)
|
||||
tntResource.Status.ProcessedItems = make([]capsulev1beta2.ObjectReferenceStatus, 0)
|
||||
}
|
||||
|
||||
// Retrieving the parent of the Tenant Resource:
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/valyala/fasttemplate"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -130,81 +131,114 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
|
||||
|
||||
syncErr := new(multierror.Error)
|
||||
|
||||
for nsIndex, item := range spec.NamespacedItems {
|
||||
keysAndValues := []interface{}{"index", nsIndex, "namespace", item.Namespace}
|
||||
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
|
||||
// Namespace: this must be blocked by checking it this is the case.
|
||||
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
|
||||
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Global", keysAndValues...)
|
||||
|
||||
continue
|
||||
}
|
||||
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
|
||||
// creating it to get used by the client List action.
|
||||
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
|
||||
if err != nil {
|
||||
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
objs := unstructured.UnstructuredList{}
|
||||
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
|
||||
|
||||
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
|
||||
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, clientErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
multiErr := new(multierror.Group)
|
||||
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
|
||||
// in case of error during the create or update function, this will be appended to the list of errors.
|
||||
for _, o := range objs.Items {
|
||||
obj := o
|
||||
|
||||
multiErr.Go(func() error {
|
||||
nsItems, nsErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
|
||||
if nsErr != nil {
|
||||
log.Error(err, "unable to sync namespacedItems", keysAndValues...)
|
||||
|
||||
return nsErr
|
||||
}
|
||||
|
||||
processed.Insert(nsItems...)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if objsErr := multiErr.Wait(); objsErr != nil {
|
||||
syncErr = multierror.Append(syncErr, objsErr)
|
||||
}
|
||||
}
|
||||
|
||||
codecFactory := serializer.NewCodecFactory(r.client.Scheme())
|
||||
|
||||
for rawIndex, item := range spec.RawItems {
|
||||
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
|
||||
for _, ns := range namespaces.Items {
|
||||
for nsIndex, item := range spec.NamespacedItems {
|
||||
keysAndValues := []any{"index", nsIndex, "namespace", item.Namespace}
|
||||
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
|
||||
// Namespace: this must be blocked by checking it this is the case.
|
||||
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
|
||||
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Tenant", keysAndValues...)
|
||||
|
||||
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode(item.Raw, nil, &obj); decodeErr != nil {
|
||||
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
|
||||
continue
|
||||
}
|
||||
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
|
||||
// creating it to get used by the client List action.
|
||||
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
|
||||
if selectorErr != nil {
|
||||
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, decodeErr)
|
||||
syncErr = multierror.Append(syncErr, selectorErr)
|
||||
|
||||
continue
|
||||
continue
|
||||
}
|
||||
|
||||
objs := unstructured.UnstructuredList{}
|
||||
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
|
||||
|
||||
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
|
||||
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, clientErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
multiErr := new(multierror.Group)
|
||||
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
|
||||
// in case of error during the create or update function, this will be appended to the list of errors.
|
||||
for _, o := range objs.Items {
|
||||
obj := o
|
||||
obj.SetNamespace(ns.Name)
|
||||
|
||||
multiErr.Go(func() error {
|
||||
kv := keysAndValues
|
||||
kv = append(kv, "resource", fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetNamespace()))
|
||||
|
||||
if opErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); opErr != nil {
|
||||
log.Error(opErr, "unable to sync namespacedItems", kv...)
|
||||
|
||||
return opErr
|
||||
}
|
||||
|
||||
log.Info("resource has been replicated", kv...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
|
||||
replicatedItem.Name = obj.GetName()
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns.Name
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
processed.Insert(replicatedItem.String())
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if objsErr := multiErr.Wait(); objsErr != nil {
|
||||
syncErr = multierror.Append(syncErr, objsErr)
|
||||
}
|
||||
}
|
||||
|
||||
syncedRaw, rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
|
||||
if rawErr != nil {
|
||||
log.Info("unable to sync rawItem", keysAndValues...)
|
||||
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
|
||||
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
|
||||
syncErr = multierror.Append(syncErr, rawErr)
|
||||
} else {
|
||||
processed.Insert(syncedRaw...)
|
||||
for rawIndex, item := range spec.RawItems {
|
||||
template := string(item.Raw)
|
||||
|
||||
t := fasttemplate.New(template, "{{ ", " }}")
|
||||
|
||||
tmplString := t.ExecuteString(map[string]interface{}{
|
||||
"tenant.name": tnt.Name,
|
||||
"namespace": ns.Name,
|
||||
})
|
||||
|
||||
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
|
||||
|
||||
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode([]byte(tmplString), nil, &obj); decodeErr != nil {
|
||||
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, decodeErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
obj.SetNamespace(ns.Name)
|
||||
|
||||
if rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); rawErr != nil {
|
||||
log.Info("unable to sync rawItem", keysAndValues...)
|
||||
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
|
||||
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
|
||||
syncErr = multierror.Append(syncErr, rawErr)
|
||||
} else {
|
||||
log.Info("resource has been replicated", keysAndValues...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
|
||||
replicatedItem.Name = obj.GetName()
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns.Name
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
processed.Insert(replicatedItem.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,64 +248,26 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
|
||||
// createOrUpdate replicates the provided unstructured object to all the provided Namespaces:
|
||||
// this function mimics the CreateOrUpdate, by retrieving the object to understand if it must be created or updated,
|
||||
// along adding the additional metadata, if required.
|
||||
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string, namespaces corev1.NamespaceList) ([]string, error) {
|
||||
log := ctrllog.FromContext(ctx)
|
||||
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string) (err error) {
|
||||
actual, desired := &unstructured.Unstructured{}, obj.DeepCopy()
|
||||
|
||||
errGroup := new(multierror.Group)
|
||||
actual.SetAPIVersion(desired.GetAPIVersion())
|
||||
actual.SetKind(desired.GetKind())
|
||||
actual.SetNamespace(desired.GetNamespace())
|
||||
actual.SetName(desired.GetName())
|
||||
|
||||
var items []string
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
|
||||
UID := actual.GetUID()
|
||||
rv := actual.GetResourceVersion()
|
||||
|
||||
for _, item := range namespaces.Items {
|
||||
ns := item.GetName()
|
||||
actual.SetUnstructuredContent(desired.Object)
|
||||
actual.SetLabels(labels)
|
||||
actual.SetAnnotations(annotations)
|
||||
actual.SetResourceVersion(rv)
|
||||
actual.SetUID(UID)
|
||||
|
||||
errGroup.Go(func() (err error) {
|
||||
actual, desired := obj.DeepCopy(), obj.DeepCopy()
|
||||
// Using a deferred function to properly log the results, and adding the item to the processed set.
|
||||
defer func() {
|
||||
keysAndValues := []interface{}{"resource", fmt.Sprintf("%s/%s", ns, desired.GetName())}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, "unable to replicate resource", keysAndValues...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("resource has been replicated", keysAndValues...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{
|
||||
Name: obj.GetName(),
|
||||
}
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
items = append(items, replicatedItem.String())
|
||||
}()
|
||||
|
||||
actual.SetNamespace(ns)
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
|
||||
UID := actual.GetUID()
|
||||
rv := actual.GetResourceVersion()
|
||||
|
||||
actual.SetUnstructuredContent(desired.Object)
|
||||
actual.SetNamespace(ns)
|
||||
actual.SetLabels(labels)
|
||||
actual.SetAnnotations(annotations)
|
||||
actual.SetResourceVersion(rv)
|
||||
actual.SetUID(UID)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
// Wait returns *multierror.Error that implements stdlib error:
|
||||
// the nil check must be performed down here rather than at the caller level to avoid wrong casting.
|
||||
if err := errGroup.Wait(); err != nil {
|
||||
return items, err
|
||||
}
|
||||
|
||||
return items, nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
"github.com/clastix/capsule/pkg/utils"
|
||||
)
|
||||
|
||||
// nolint:dupl
|
||||
// Ensuring all the LimitRange are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncLimitRanges(ctx context.Context, tenant *capsulev1beta2.Tenant) error {
|
||||
func (r *Manager) syncLimitRanges(ctx context.Context, tenant *capsulev1beta2.Tenant) error { //nolint:dupl
|
||||
// getting requested LimitRange keys
|
||||
keys := make([]string, 0, len(tenant.Spec.LimitRanges.Items))
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r *Manager) syncNamespaces(ctx context.Context, tenant *capsulev1beta2.Ten
|
||||
return
|
||||
}
|
||||
|
||||
// nolint:gocognit
|
||||
//nolint:gocognit
|
||||
func (r *Manager) syncNamespaceMetadata(ctx context.Context, namespace string, tnt *capsulev1beta2.Tenant) (err error) {
|
||||
var res controllerutil.OperationResult
|
||||
|
||||
|
||||
@@ -17,9 +17,8 @@ import (
|
||||
"github.com/clastix/capsule/pkg/utils"
|
||||
)
|
||||
|
||||
// nolint:dupl
|
||||
// Ensuring all the NetworkPolicies are applied to each Namespace handled by the Tenant.
|
||||
func (r *Manager) syncNetworkPolicies(ctx context.Context, tenant *capsulev1beta2.Tenant) error {
|
||||
func (r *Manager) syncNetworkPolicies(ctx context.Context, tenant *capsulev1beta2.Tenant) error { //nolint:dupl
|
||||
// getting requested NetworkPolicy keys
|
||||
keys := make([]string, 0, len(tenant.Spec.NetworkPolicies.Items))
|
||||
|
||||
|
||||
@@ -36,8 +36,7 @@ import (
|
||||
// the mutateFn along with the CreateOrUpdate to don't perform the update since resources are identical.
|
||||
//
|
||||
// In case of Namespace-scoped Resource Budget, we're just replicating the resources across all registered Namespaces.
|
||||
// nolint:gocognit
|
||||
func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2.Tenant) (err error) {
|
||||
func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2.Tenant) (err error) { //nolint:gocognit
|
||||
// getting ResourceQuota labels for the mutateFn
|
||||
var tenantLabel, typeLabel string
|
||||
|
||||
@@ -48,7 +47,7 @@ func (r *Manager) syncResourceQuotas(ctx context.Context, tenant *capsulev1beta2
|
||||
if typeLabel, err = utils.GetTypeLabel(&corev1.ResourceQuota{}); err != nil {
|
||||
return err
|
||||
}
|
||||
// nolint:nestif
|
||||
//nolint:nestif
|
||||
if tenant.Spec.ResourceQuota.Scope == api.ResourceQuotaScopeTenant {
|
||||
group := new(errgroup.Group)
|
||||
|
||||
|
||||
@@ -25,7 +25,7 @@ func (r *Manager) syncCustomResourceQuotaUsages(ctx context.Context, tenant *cap
|
||||
group string
|
||||
version string
|
||||
}
|
||||
// nolint:prealloc
|
||||
//nolint:prealloc
|
||||
var resourceList []resource
|
||||
|
||||
for k := range tenant.GetAnnotations() {
|
||||
|
||||
@@ -3081,7 +3081,7 @@ TenantSpec defines the desired state of Tenant.
|
||||
<td>
|
||||
<br/>
|
||||
<br/>
|
||||
<i>Enum</i>: Nodes, StorageClasses, IngressClasses, PriorityClasses<br/>
|
||||
<i>Enum</i>: Nodes, StorageClasses, IngressClasses, PriorityClasses, RuntimeClasses, PersistentVolumes<br/>
|
||||
</td>
|
||||
<td>true</td>
|
||||
</tr><tr>
|
||||
|
||||
@@ -10,7 +10,7 @@ Error from server (Forbidden): namespaces is forbidden:
|
||||
User "alice" cannot list resource "namespaces" in API group "" at the cluster scope
|
||||
```
|
||||
|
||||
However, the user can have permissions on some namespaces
|
||||
However, the user can have permission on some namespaces
|
||||
|
||||
```
|
||||
$ kubectl auth can-i [get|list|watch|delete] ns oil-production
|
||||
@@ -21,13 +21,13 @@ The reason, as the error message reported, is that the RBAC _list_ action is ava
|
||||
|
||||
To overcome this problem, many Kubernetes distributions introduced mirrored custom resources supported by a custom set of ACL-filtered APIs. However, this leads to radically change the user's experience of Kubernetes by introducing hard customizations that make it painful to move from one distribution to another.
|
||||
|
||||
With **Capsule**, we took a different approach. As one of the key goals, we want to keep the same user's experience on all the distributions of Kubernetes. We want people to use the standard tools they already know and love and it should just work.
|
||||
With **Capsule**, we took a different approach. As one of the key goals, we want to keep the same user experience on all the distributions of Kubernetes. We want people to use the standard tools they already know and love and it should just work.
|
||||
|
||||
## How it works
|
||||
|
||||
The `capsule-proxy` implements a simple reverse proxy that intercepts only specific requests to the APIs server and Capsule does all the magic behind the scenes.
|
||||
|
||||
Current implementation filters the following requests:
|
||||
The current implementation filters the following requests:
|
||||
|
||||
* `/api/scheduling.k8s.io/{v1}/priorityclasses{/name}`
|
||||
* `/api/v1/namespaces{/name}`
|
||||
@@ -37,13 +37,16 @@ Current implementation filters the following requests:
|
||||
* `/apis/metrics.k8s.io/{v1beta1}/nodes{/name}`
|
||||
* `/apis/networking.k8s.io/{v1,v1beta1}/ingressclasses{/name}`
|
||||
* `/apis/storage.k8s.io/v1/storageclasses{/name}`
|
||||
* `/apis/node.k8s.io/v1/runtimeclasses{/name}`
|
||||
* `/api/v1/persistentvolumes{/name}`
|
||||
|
||||
All other requests are proxied transparently to the APIs server, so no side effects are expected. We're planning to add new APIs in the future, so [PRs are welcome](https://github.com/clastix/capsule-proxy)!
|
||||
All other requests are proxy-passed transparently to the API server, so no side effects are expected.
|
||||
We're planning to add new APIs in the future, so [PRs are welcome](https://github.com/clastix/capsule-proxy)!
|
||||
|
||||
## Installation
|
||||
|
||||
Capsule Proxy is an optional add-on of the main Capsule Operator, so make sure you have a working instance of Capsule before attempting to install it.
|
||||
Use the `capsule-proxy` only if you want Tenant Owners to list their own Cluster-Scope resources.
|
||||
Use the `capsule-proxy` only if you want Tenant Owners to list their Cluster-Scope resources.
|
||||
|
||||
The `capsule-proxy` can be deployed in standalone mode, e.g. running as a pod bridging any Kubernetes client to the APIs server.
|
||||
Optionally, it can be deployed as a sidecar container in the backend of a dashboard.
|
||||
@@ -72,22 +75,22 @@ Here how it looks like when exposed through an Ingress Controller:
|
||||
## CLI flags
|
||||
|
||||
- `capsule-configuration-name`: name of the `CapsuleConfiguration` resource which is containing the [Capsule configurations](/docs/general/references/#capsule-configuration) (default: `default`)
|
||||
- `capsule-user-group` (deprecated): old way to specify the user groups which request must be intercepted by the proxy
|
||||
- `ignored-user-group`: names of the groups which requests must be ignored and proxy-passed to the upstream server
|
||||
- `capsule-user-group` (deprecated): the old way to specify the user groups whose request must be intercepted by the proxy
|
||||
- `ignored-user-group`: names of the groups whose requests must be ignored and proxy-passed to the upstream server
|
||||
- `listening-port`: HTTP port the proxy listens to (default: `9001`)
|
||||
- `oidc-username-claim`: the OIDC field name used to identify the user (default: `preferred_username`), the proper value can be extracted from the Kubernetes API Server flags
|
||||
- `enable-ssl`: enable the bind on HTTPS for secure communication, allowing client-based certificate, also knows as mutual TLS (default: `true`)
|
||||
- `enable-ssl`: enable the bind on HTTPS for secure communication, allowing client-based certificate, also known as mutual TLS (default: `true`)
|
||||
- `ssl-cert-path`: path to the TLS certificate, then TLS mode is enabled (default: `/opt/capsule-proxy/tls.crt`)
|
||||
- `ssl-key-path`: path to the TLS certificate key, when TLS mode is enabled (default: `/opt/capsule-proxy/tls.key`)
|
||||
- `rolebindings-resync-period`: resync period for RoleBinding resources reflector, lower values can help if you're facing [flaky etcd connection](https://github.com/clastix/capsule-proxy/issues/174) (default: `10h`)
|
||||
|
||||
## User Authentication
|
||||
|
||||
The `capsule-proxy` intercepts all the requests from the `kubectl` client directed to the APIs Server. Users using a TLS client based authentication with certificate and key are able to talks with APIs Server since it is able to forward client certificates to the Kubernetes APIs server.
|
||||
The `capsule-proxy` intercepts all the requests from the `kubectl` client directed to the APIs Server. Users using a TLS client-based authentication with a certificate and key can talk with the API Server since it can forward client certificates to the Kubernetes APIs server.
|
||||
|
||||
It is possible to protect the `capsule-proxy` using a certificate provided by Let's Encrypt. Keep in mind that, in this way, the TLS termination will be executed by the Ingress Controller, meaning that the authentication based on client certificate will be withdrawn and not reversed to the upstream.
|
||||
It is possible to protect the `capsule-proxy` using a certificate provided by Let's Encrypt. Keep in mind that, in this way, the TLS termination will be executed by the Ingress Controller, meaning that the authentication based on the client certificate will be withdrawn and not reversed to the upstream.
|
||||
|
||||
If your prerequisite is exposing `capsule-proxy` using an Ingress, you must rely on the token-based authentication, for example OIDC or Bearer tokens. Users providing tokens are always able to reach the APIs Server.
|
||||
If your prerequisite is exposing `capsule-proxy` using an Ingress, you must rely on the token-based authentication, for example, OIDC or Bearer tokens. Users providing tokens are always able to reach the APIs Server.
|
||||
|
||||
## Kubernetes dashboards integration
|
||||
|
||||
@@ -124,6 +127,8 @@ The proxy setting `kind` is an __enum__ accepting the supported resources:
|
||||
- `StorageClasses`
|
||||
- `IngressClasses`
|
||||
- `PriorityClasses`
|
||||
- `RuntimeClasses`
|
||||
- `PersistentVolumes`
|
||||
|
||||
Each Resource kind can be granted with several verbs, such as:
|
||||
|
||||
@@ -131,6 +136,14 @@ Each Resource kind can be granted with several verbs, such as:
|
||||
- `Update`
|
||||
- `Delete`
|
||||
|
||||
## Cluster-scoped resources selection strategy precedence
|
||||
|
||||
Starting from [Capsule v0.2.0](https://github.com/clastix/capsule/releases/tag/v0.2.0), selection of cluster-scoped resources based on labels has been introduced.
|
||||
|
||||
Due to the limitations of Kubernetes API Server which not support `OR` label selector, the Capsule core team decided to give precedence to the label selector over the exact and regex match.
|
||||
|
||||
Capsule is going to deprecate in the upcoming feature the selection based on exact names and regex in order to approach entirely to the matching labels approach of Kubernetes itself.
|
||||
|
||||
### Namespaces
|
||||
|
||||
As tenant owner `alice`, you can use `kubectl` to create some namespaces:
|
||||
@@ -162,7 +175,7 @@ metadata:
|
||||
EOF
|
||||
|
||||
namespace/solar-development unchanged
|
||||
# or, in case of non existing Namespace:
|
||||
# or, in case of non-existing Namespace:
|
||||
namespace/solar-development created
|
||||
```
|
||||
|
||||
@@ -202,7 +215,7 @@ When issuing a `kubectl describe node`, some other endpoints are put in place:
|
||||
* `api/v1/pods?fieldSelector=spec.nodeName%3D{name}`
|
||||
* `/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/{name}`
|
||||
|
||||
These are mandatory in order to retrieve the list of the running Pods on the required node, and providing info about the lease status of it.
|
||||
These are mandatory to retrieve the list of the running Pods on the required node and provide info about its lease status.
|
||||
|
||||
### Storage Classes
|
||||
|
||||
@@ -239,7 +252,7 @@ glusterfs rook.io/glusterfs Delete WaitForFirstConsum
|
||||
zol zfs-on-linux/zfs Delete WaitForFirstConsumer false 54m
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the `custom` Storage Class as well the other ones matching the regex `\w+fs`.
|
||||
The expected output using `capsule-proxy` is the retrieval of the `custom` Storage Class as well as the other ones matching the regex `\w+fs`.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get storageclasses
|
||||
@@ -259,7 +272,6 @@ metadata:
|
||||
name: cephfs
|
||||
name: cephfs
|
||||
provisioner: cephfs
|
||||
|
||||
```
|
||||
|
||||
### Ingress Classes
|
||||
@@ -286,7 +298,7 @@ spec:
|
||||
allowedRegex: "\\w+-lb"
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more Ingress Class resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
In the Kubernetes cluster, we could have more Ingress Class resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get ingressclasses
|
||||
@@ -385,23 +397,141 @@ globalDefault: false
|
||||
description: "Priority class for Tenants"
|
||||
```
|
||||
|
||||
### ProxySetting Use Case
|
||||
Consider a scenario, where a cluster admin creates a tenant and assign ownership of the tenant to a user, so called tenant owner. Afterwards, tenant owner would in turn like to provide access to their cluster-scoped resources to a set of users (e.g. non-owners or tenant users), groups and service accounts, who doesn't require tenant owner level permissions.
|
||||
### Runtime Classes
|
||||
|
||||
Tenant Owner can provide access to following cluster-scoped resources to their tenant users, groups and service account by creating `ProxySetting` resource
|
||||
Allowed RuntimeClasses assigned to a Tenant Owner can be enforced as follows:
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: PriorityClasses
|
||||
operations:
|
||||
- List
|
||||
runtimeClasses:
|
||||
matchExpressions:
|
||||
- key: capsule.clastix.io/qos
|
||||
operator: Exists
|
||||
values:
|
||||
- bronze
|
||||
- silver
|
||||
```
|
||||
|
||||
In the Kubernetes cluster we could have more RuntimeClasses resources, some of them forbidden and non-usable by the Tenant owner.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get runtimeclasses.node.k8s.io --show-labels
|
||||
NAME HANDLER AGE LABELS
|
||||
bronze bronze 21h capsule.clastix.io/qos=bronze
|
||||
default myconfiguration 21h <none>
|
||||
gold gold 21h capsule.clastix.io/qos=gold
|
||||
silver silver 21h capsule.clastix.io/qos=silver
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the `bronze` and `silver` ones.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get runtimeclasses.node.k8s.io
|
||||
NAME HANDLER AGE
|
||||
bronze bronze 21h
|
||||
silver silver 21h
|
||||
```
|
||||
|
||||
> `RuntimeClass` is one of the latest implementations in Capsule Proxy and is adhering to the new selection strategy based on labels selector, rather than exact match and regex ones.
|
||||
>
|
||||
> The latter ones are going to be deprecated in the upcoming releases of Capsule.
|
||||
|
||||
### Persistent Volumes
|
||||
|
||||
A Tenant can request persistent volumes through the `PersistentVolumeClaim` API, and get a volume from it.
|
||||
|
||||
Starting from release v0.2.0, all the `PersistentVolumes` are labelled with the Capsule label that is used by the Capsule Proxy to allow the retrieval.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
annotations:
|
||||
finalizers:
|
||||
- kubernetes.io/pv-protection
|
||||
labels:
|
||||
capsule.clastix.io/tenant: oil
|
||||
name: data-01
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
hostPath:
|
||||
path: /mnt/data
|
||||
type: ""
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: manual
|
||||
volumeMode: Filesystem
|
||||
```
|
||||
|
||||
> Please, notice the label `capsule.clastix.io/tenant` matching the Tenant name.
|
||||
|
||||
With that said, a multi-tenant cluster can be made of several volumes, each one for different tenants.
|
||||
|
||||
```bash
|
||||
$ kubectl --context admin@mycluster get persistentvolumes --show-labels
|
||||
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE LABELS
|
||||
data-01 10Gi RWO Retain Available manual 17h capsule.clastix.io/tenant=oil
|
||||
data-02 10Gi RWO Retain Available manual 17h capsule.clastix.io/tenant=gas
|
||||
|
||||
```
|
||||
|
||||
For the `oil` Tenant, Alice has the required permission to list Volumes.
|
||||
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: oil
|
||||
spec:
|
||||
owners:
|
||||
- kind: User
|
||||
name: alice
|
||||
proxySettings:
|
||||
- kind: PersistentVolumes
|
||||
operations:
|
||||
- List
|
||||
```
|
||||
|
||||
The expected output using `capsule-proxy` is the retrieval of the PVs used currently, or in the past, by the PVCs in their Tenants.
|
||||
|
||||
```bash
|
||||
$ kubectl --context alice-oidc@mycluster get persistentvolumes
|
||||
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||||
data-01 10Gi RWO Retain Available manual 17h
|
||||
```
|
||||
|
||||
### ProxySetting Use Case
|
||||
Consider a scenario, where a cluster admin creates a tenant and assigns ownership of the tenant to a user, the so-called tenant owner. Afterwards, tenant owner would in turn like to provide access to their cluster-scoped resources to a set of users (e.g. non-owners or tenant users), groups and service accounts, who doesn't require tenant-owner-level permissions.
|
||||
|
||||
Tenant Owner can provide access to the following cluster-scoped resources to their tenant users, groups and service account by creating `ProxySetting` resource
|
||||
- `Nodes`
|
||||
- `StorageClasses`
|
||||
- `IngressClasses`
|
||||
- `PriorityClasses`
|
||||
- `RuntimeClasses`
|
||||
- `PersistentVolumes`
|
||||
|
||||
Each Resource kind can be granted with following verbs, such as:
|
||||
Each Resource kind can be granted with the following verbs, such as:
|
||||
- `List`
|
||||
- `Update`
|
||||
- `Delete`
|
||||
|
||||
These tenant users, groups and services accounts have less privileged access than tenant owners.
|
||||
|
||||
As a Tenant Owner `alice`, you can create a `ProxySetting` resources to allow `bob` to list nodes, storage classes, ingress classes and priority classes
|
||||
As a Tenant Owner `alice`, you can create a `ProxySetting` resource to allow `bob` to list nodes, storage classes, ingress classes and priority classes
|
||||
```yaml
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: ProxySetting
|
||||
@@ -439,7 +569,7 @@ $ kubectl auth can-i --context bob-oidc@mycluster get priorityclasses
|
||||
yes
|
||||
```
|
||||
## HTTP support
|
||||
Capsule proxy supports `https` and `http`, although the latter is not recommended, we understand that it can be useful for some use cases (i.e. development, working behind a TLS-terminated reverse proxy and so on). As the default behaviour is to work with `https`, we need to use the flag `--enable-ssl=false` if we really want to work under `http`.
|
||||
Capsule proxy supports `https` and `http`, although the latter is not recommended, we understand that it can be useful for some use cases (i.e. development, working behind a TLS-terminated reverse proxy and so on). As the default behaviour is to work with `https`, we need to use the flag `--enable-ssl=false` if we want to work under `http`.
|
||||
|
||||
After having the `capsule-proxy` working under `http`, requests must provide authentication using an allowed Bearer Token.
|
||||
|
||||
@@ -456,16 +586,16 @@ $ curl -H "Authorization: Bearer $TOKEN" http://localhost:9001/api/v1/namespaces
|
||||
|
||||
Starting from the v0.3.0 release, Capsule Proxy exposes Prometheus metrics available at `http://0.0.0.0:8080/metrics`.
|
||||
|
||||
The offered metrics are related to the internal `controller-manager` code base, such as work-queue and REST client requests, and the Go runtime ones.
|
||||
The offered metrics are related to the internal `controller-manager` code base, such as work queue and REST client requests, and the Go runtime ones.
|
||||
|
||||
Along with these, metrics `capsule_proxy_response_time_seconds` and `capsule_proxy_requests_total` have been introduced and are specific to the Capsule Proxy code-base and functionalities.
|
||||
|
||||
`capsule_proxy_response_time_seconds` offers a bucket representation of the HTTP request duration.
|
||||
The available variables for this metrics are the following ones:
|
||||
- `path`: the HTTP path of each single request that Capsule Proxy passes to the upstream
|
||||
The available variables for these metrics are the following ones:
|
||||
- `path`: the HTTP path of every single request that Capsule Proxy passes to the upstream
|
||||
|
||||
`capsule_proxy_requests_total` counts the global requests that Capsule Proxy is passing to the upstream with the following labels.
|
||||
- `path`: the HTTP path of each single request that Capsule Proxy passes to the upstream
|
||||
- `path`: the HTTP path of every single request that Capsule Proxy passes to the upstream
|
||||
- `status`: the HTTP status code of the request
|
||||
|
||||
> Example output of the metrics:
|
||||
@@ -493,6 +623,6 @@ The available variables for this metrics are the following ones:
|
||||
|
||||
## Contributing
|
||||
|
||||
`capsule-proxy` is an open-source software released with Apache2 [license](https://github.com/clastix/capsule-proxy/blob/master/LICENSE).
|
||||
`capsule-proxy` is open-source software released with Apache2 [license](https://github.com/clastix/capsule-proxy/blob/master/LICENSE).
|
||||
|
||||
Contributing guidelines are available [here](https://github.com/clastix/capsule-proxy/blob/master/CONTRIBUTING.md).
|
||||
|
||||
@@ -3,7 +3,8 @@
|
||||
List of Tenant API changes:
|
||||
|
||||
- [Capsule v0.1.0](https://github.com/clastix/capsule/releases/tag/v0.1.0) bump to `v1beta1` from `v1alpha1`.
|
||||
- [Capsule v0.2.0](https://github.com/clastix/capsule/releases/tag/v0.1.0) bump to `v1beta2` from `v1beta1`, deprecating `v1alpha1`.
|
||||
- [Capsule v0.2.0](https://github.com/clastix/capsule/releases/tag/v0.2.0) bump to `v1beta2` from `v1beta1`, deprecating `v1alpha1`.
|
||||
- [Capsule v0.3.0](https://github.com/clastix/capsule/releases/tag/v0.3.0) missing enums required by [Capsule Proxy](https://github.com/clastix/capsule-proxy).
|
||||
|
||||
This document aims to provide support and a guide on how to perform a clean upgrade to the latest API version in order to avoid service disruption and data loss.
|
||||
|
||||
@@ -14,6 +15,55 @@ As an installation method, Helm is given for granted, YMMV using the `kustomize`
|
||||
We strongly suggest performing a full backup of your Kubernetes cluster, such as storage and etcd.
|
||||
Use your favourite tool according to your needs.
|
||||
|
||||
# Upgrading from v0.2.x to v0.3.x
|
||||
|
||||
A minor bump has been requested due to some missing enums in the Tenant resource.
|
||||
|
||||
## Scale down the Capsule controller
|
||||
|
||||
Using the `kubectl` or Helm, scale down the Capsule controller manager: this is required to avoid the old Capsule version from processing objects that aren't yet installed as a CRD.
|
||||
|
||||
```
|
||||
helm upgrade -n capsule-system capsule --set "replicaCount=0"
|
||||
```
|
||||
|
||||
## Patch the Tenant custom resource definition
|
||||
|
||||
Unfortunately, Helm doesn't manage the lifecycle of Custom Resource Definitions, additional details can be found [here](https://github.com/helm/community/blob/f9e06c16d89ccea1bea77c01a6a96ae3b309f823/architecture/crds.md).
|
||||
|
||||
This process must be executed manually as follows:
|
||||
|
||||
```
|
||||
kubectl apply -f https://raw.githubusercontent.com/clastix/capsule/v0.3.0/config/crd/bases/tenant-crd.yaml
|
||||
```
|
||||
|
||||
## Update your Capsule Helm chart
|
||||
|
||||
Ensure to update the Capsule repository to fetch the latest changes.
|
||||
|
||||
```
|
||||
helm repo update
|
||||
```
|
||||
|
||||
The latest Chart must be used, at the current time, >=0.4.0 is expected for Capsule >=v0.3.0, you can fetch the full list of available charts with the following command.
|
||||
|
||||
```
|
||||
helm search repo -l clastix/capsule
|
||||
```
|
||||
|
||||
Since the Tenant custom resource definition has been patched with new fields, we can install back Capsule using the provided Helm chart.
|
||||
|
||||
```
|
||||
helm upgrade --install capsule clastix/capsule -n capsule-system --create-namespace --version 0.4.0
|
||||
```
|
||||
|
||||
This will start the Operator with the latest changes, and perform the required sync operations like:
|
||||
|
||||
1. Ensuring the CA is still valid
|
||||
2. Ensuring a TLS certificate is valid for the local webhook server
|
||||
3. If not using the cert-manager integration, patching the Validating and Mutating Webhook Configuration resources with the Capsule CA
|
||||
4. If not using the cert-manager integration, patching the Capsule's Custom Resource Definitions conversion webhook fields with the Capsule CA
|
||||
|
||||
# Upgrading from v0.1.3 to v0.2.x
|
||||
|
||||
## Scale down the Capsule controller
|
||||
|
||||
@@ -104,6 +104,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-1",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -118,6 +122,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-2",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -132,6 +140,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-3",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -220,6 +232,16 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
return secrets.Items
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(HaveLen(4))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("ensuring raw items are templated in %s Namespace", ns), func() {
|
||||
for _, name := range []string{"raw-secret-1", "raw-secret-2", "raw-secret-3"} {
|
||||
secret := corev1.Secret{}
|
||||
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: ns}, &secret)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(secret.Data).To(HaveKey(solar.Name))
|
||||
Expect(secret.Data).To(HaveKey(ns))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
By("using a Namespace selector", func() {
|
||||
|
||||
10
go.mod
10
go.mod
@@ -10,6 +10,7 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
go.uber.org/zap v1.19.1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
k8s.io/api v0.24.2
|
||||
@@ -59,13 +60,14 @@ require (
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -482,6 +482,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -634,8 +638,8 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -729,11 +733,12 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@@ -742,8 +747,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
||||
145
hack/local-test-with-kind.sh
Executable file
145
hack/local-test-with-kind.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script test capsule with kind
|
||||
# Good to use it before pull request
|
||||
|
||||
USER=alice
|
||||
TENANT=oil
|
||||
GROUP=capsule.clastix.io
|
||||
KIND_CLUSTER_NAME=capsule-local-test
|
||||
|
||||
function error_action() {
|
||||
cleanup_action
|
||||
exit 1
|
||||
}
|
||||
|
||||
function cleanup_action() {
|
||||
kind delete cluster --name=${KIND_CLUSTER_NAME}
|
||||
rm -f ./tenant-test.yaml
|
||||
rm -f ${USER}-${TENANT}.crt
|
||||
rm -f ${USER}-${TENANT}.key
|
||||
rm -f ${USER}-${TENANT}.kubeconfig
|
||||
}
|
||||
|
||||
function check_command() {
|
||||
local command=$1
|
||||
|
||||
if ! command -v $command &> /dev/null; then
|
||||
echo "Error: ${command} not found"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_command kind
|
||||
check_command kubectl
|
||||
|
||||
### Prepare Kind cluster
|
||||
|
||||
echo `date`": INFO: Create Kind Cluster"
|
||||
error_create_kind=$(kind create cluster --name=${KIND_CLUSTER_NAME} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_create_kind"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Wait then Kind cluster be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get nodes | grep " Ready " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
((counter++))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo `date`": INFO: Kind cluster ready"
|
||||
|
||||
### Install helm capsule to Kind
|
||||
|
||||
echo `date`": INFO: Install helm capsule"
|
||||
error_install_helm=$(helm install capsule ./charts/capsule/ -n capsule-system --create-namespace 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_install_helm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Wait then capsule POD be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get pod -n capsule-system | grep " Running " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
((counter++))
|
||||
sleep 1
|
||||
done
|
||||
sleep 5
|
||||
|
||||
echo `date`": INFO: Capsule ready"
|
||||
|
||||
### Tests
|
||||
|
||||
echo `date`": INFO: Create tenant"
|
||||
cat >>./tenant-test.yaml<<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: ${TENANT}
|
||||
spec:
|
||||
owners:
|
||||
- name: ${USER}
|
||||
kind: User
|
||||
EOF
|
||||
|
||||
error_create_tenant=$(kubectl create -f ./tenant-test.yaml 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_create_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Check tenant exist"
|
||||
error_check_tenant=$(kubectl get tenant ${TENANT} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_check_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Create user ${USER} for tenant ${TENANT}"
|
||||
error_create_user=$(./hack/create-user.sh ${USER} ${TENANT} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_create_user"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Create namespace from tenant user"
|
||||
error_create_namespace=$(kubectl --kubeconfig=${USER}-${TENANT}.kubeconfig create ns ${TENANT}-test 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_create_namespace"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Check namespace exist in tenant"
|
||||
error_tenant=$(kubectl get tenant ${TENANT} -o yaml | grep namespaces -A1 | grep ${TENANT}-test 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: All ok"
|
||||
|
||||
cleanup_action
|
||||
7
main.go
7
main.go
@@ -96,14 +96,17 @@ func newDelegatingClient(cache cache.Cache, config *rest.Config, options client.
|
||||
return delegatingClient, nil
|
||||
}
|
||||
|
||||
// nolint:maintidx,cyclop
|
||||
//nolint:maintidx,cyclop
|
||||
func main() {
|
||||
var enableLeaderElection, version bool
|
||||
|
||||
var metricsAddr, namespace, configurationName string
|
||||
|
||||
var webhookPort int
|
||||
|
||||
var goFlagSet goflag.FlagSet
|
||||
|
||||
flag.IntVar(&webhookPort, "webhook-port", 9443, "The port the webhook server binds to.")
|
||||
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
|
||||
"Enable leader election for controller manager. "+
|
||||
@@ -142,7 +145,7 @@ func main() {
|
||||
manager, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
MetricsBindAddress: metricsAddr,
|
||||
Port: 9443,
|
||||
Port: webhookPort,
|
||||
LeaderElection: enableLeaderElection,
|
||||
LeaderElectionID: "42c733ea.clastix.capsule.io",
|
||||
HealthProbeBindAddress: ":10080",
|
||||
|
||||
@@ -144,7 +144,7 @@ func NewCertificateAuthorityFromBytes(certBytes, keyBytes []byte) (*CapsuleCA, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
// nolint:nakedret
|
||||
//nolint:nakedret
|
||||
func (c *CapsuleCA) GenerateCertificate(opts CertificateOptions) (certificatePem *bytes.Buffer, certificateKey *bytes.Buffer, err error) {
|
||||
var certPrivKey *rsa.PrivateKey
|
||||
certPrivKey, err = rsa.GenerateKey(rand.Reader, 4096)
|
||||
|
||||
@@ -46,7 +46,7 @@ func NewCapsuleConfiguration(ctx context.Context, client client.Client, name str
|
||||
func (c *capsuleConfiguration) ProtectedNamespaceRegexp() (*regexp.Regexp, error) {
|
||||
expr := c.retrievalFn().Spec.ProtectedNamespaceRegexpString
|
||||
if len(expr) == 0 {
|
||||
return nil, nil // nolint:nilnil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
r, err := regexp.Compile(expr)
|
||||
|
||||
@@ -22,7 +22,7 @@ type HostnamePath struct {
|
||||
Obj metav1.Object
|
||||
}
|
||||
|
||||
// nolint:forcetypeassert
|
||||
//nolint:forcetypeassert
|
||||
func (s HostnamePath) Object() client.Object {
|
||||
return s.Obj.(client.Object)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func (o NamespacesReference) Field() string {
|
||||
return ".status.namespaces"
|
||||
}
|
||||
|
||||
// nolint:forcetypeassert
|
||||
//nolint:forcetypeassert
|
||||
func (o NamespacesReference) Func() client.IndexerFunc {
|
||||
return func(object client.Object) []string {
|
||||
return object.(api.Tenant).GetNamespaces()
|
||||
|
||||
@@ -15,9 +15,7 @@ type userGroupList []string
|
||||
|
||||
func NewUserGroupList(groups []string) UserGroupList {
|
||||
list := make(userGroupList, len(groups))
|
||||
for k, v := range groups {
|
||||
list[k] = v
|
||||
}
|
||||
copy(list, groups)
|
||||
|
||||
sort.SliceStable(list, func(i, j int) bool {
|
||||
return list[i] < list[j]
|
||||
|
||||
@@ -88,7 +88,7 @@ func (i ingressClassNotValidError) Error() string {
|
||||
return utils.DefaultAllowedValuesErrorMessage(i.spec, err)
|
||||
}
|
||||
|
||||
// nolint:predeclared
|
||||
//nolint:predeclared
|
||||
func appendHostnameError(spec api.AllowedListSpec) (append string) {
|
||||
if len(spec.Exact) > 0 {
|
||||
append = fmt.Sprintf(", specify one of the following (%s)", strings.Join(spec.Exact, ", "))
|
||||
|
||||
@@ -63,7 +63,7 @@ func (n NetworkingV1) Namespace() string {
|
||||
return n.GetNamespace()
|
||||
}
|
||||
|
||||
// nolint:dupl
|
||||
//nolint:dupl
|
||||
func (n NetworkingV1) HostnamePathsPairs() (pairs map[string]sets.String) {
|
||||
pairs = make(map[string]sets.String)
|
||||
|
||||
@@ -129,7 +129,7 @@ func (n NetworkingV1Beta1) Namespace() string {
|
||||
return n.GetNamespace()
|
||||
}
|
||||
|
||||
// nolint:dupl
|
||||
//nolint:dupl
|
||||
func (n NetworkingV1Beta1) HostnamePathsPairs() (pairs map[string]sets.String) {
|
||||
pairs = make(map[string]sets.String)
|
||||
|
||||
@@ -193,7 +193,7 @@ func (e Extension) Namespace() string {
|
||||
return e.GetNamespace()
|
||||
}
|
||||
|
||||
// nolint:dupl
|
||||
//nolint:dupl
|
||||
func (e Extension) HostnamePathsPairs() (pairs map[string]sets.String) {
|
||||
pairs = make(map[string]sets.String)
|
||||
|
||||
|
||||
@@ -26,13 +26,13 @@ func TenantFromIngress(ctx context.Context, c client.Client, ingress Ingress) (*
|
||||
}
|
||||
|
||||
if len(tenantList.Items) == 0 {
|
||||
return nil, nil // nolint:nilnil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
return &tenantList.Items[0], nil
|
||||
}
|
||||
|
||||
// nolint:nakedret
|
||||
//nolint:nakedret
|
||||
func FromRequest(req admission.Request, decoder *admission.Decoder) (ingress Ingress, err error) {
|
||||
switch req.Kind.Group {
|
||||
case "networking.k8s.io":
|
||||
|
||||
@@ -84,7 +84,7 @@ func (r *collision) validate(ctx context.Context, client client.Client, req admi
|
||||
return &response
|
||||
}
|
||||
|
||||
// nolint:gocognit,gocyclo,cyclop
|
||||
//nolint:gocognit,gocyclo,cyclop
|
||||
func (r *collision) validateCollision(ctx context.Context, clt client.Client, ing Ingress, scope api.HostnameCollisionScope) error {
|
||||
for hostname, paths := range ing.HostnamePathsPairs() {
|
||||
for path := range paths {
|
||||
@@ -100,7 +100,7 @@ func (r *collision) validateCollision(ctx context.Context, clt client.Client, in
|
||||
}
|
||||
|
||||
namespaces := sets.NewString()
|
||||
// nolint:exhaustive
|
||||
//nolint:exhaustive
|
||||
switch scope {
|
||||
case api.HostnameCollisionScopeCluster:
|
||||
tenantList := &capsulev1beta2.TenantList{}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
capsuleapi "github.com/clastix/capsule/pkg/api"
|
||||
)
|
||||
|
||||
// nolint:predeclared
|
||||
//nolint:predeclared
|
||||
func appendForbiddenError(spec *capsuleapi.ForbiddenListSpec) (append string) {
|
||||
append += "Forbidden are "
|
||||
if len(spec.Exact) > 0 {
|
||||
|
||||
@@ -125,7 +125,7 @@ func (r *userMetadataHandler) OnUpdate(client client.Client, decoder *admission.
|
||||
}
|
||||
|
||||
if v != oldNs.GetAnnotations()["scheduler.alpha.kubernetes.io/node-selector"] {
|
||||
response := admission.Denied("the the node-selector annotation is enforced, cannot be updated")
|
||||
response := admission.Denied("the node-selector annotation is enforced, cannot be updated")
|
||||
|
||||
recorder.Eventf(tnt, corev1.EventTypeWarning, "ForbiddenNodeSelectorUpdate", string(response.Result.Reason))
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func (r *handler) generic(ctx context.Context, req admission.Request, client cli
|
||||
return tnt, nil
|
||||
}
|
||||
|
||||
return nil, nil // nolint:nilnil
|
||||
return nil, nil //nolint:nilnil
|
||||
}
|
||||
|
||||
//nolint:dupl
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
capsulev1beta2 "github.com/clastix/capsule/pkg/api"
|
||||
)
|
||||
|
||||
// nolint:predeclared
|
||||
//nolint:predeclared
|
||||
func appendForbiddenError(spec *capsulev1beta2.ForbiddenListSpec) (append string) {
|
||||
append += "Forbidden are "
|
||||
if len(spec.Exact) > 0 {
|
||||
|
||||
@@ -86,7 +86,7 @@ func (h *containerRegistryHandler) VerifyContainerRegistry(recorder record.Event
|
||||
reg := NewRegistry(container.Image)
|
||||
|
||||
if len(reg.Registry()) == 0 {
|
||||
recorder.Eventf(&tnt, corev1.EventTypeWarning, "MissingFQCI", "Pod %s/%s is not using using a fully qualified container image, cannot enforce registry the current Tenant", req.Namespace, req.Name, reg.Registry())
|
||||
recorder.Eventf(&tnt, corev1.EventTypeWarning, "MissingFQCI", "Pod %s/%s is not using a fully qualified container image, cannot enforce registry the current Tenant", req.Namespace, req.Name, reg.Registry())
|
||||
|
||||
response := admission.Denied(NewContainerRegistryForbidden(container.Image, *tnt.Spec.ContainerRegistries).Error())
|
||||
|
||||
|
||||
@@ -8,9 +8,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
@@ -25,24 +22,6 @@ func PriorityClass() capsulewebhook.Handler {
|
||||
return &priorityClass{}
|
||||
}
|
||||
|
||||
func (h *priorityClass) class(ctx context.Context, c client.Client, name string) (client.Object, error) {
|
||||
if len(name) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
obj := &schedulingv1.PriorityClass{}
|
||||
|
||||
if err := c.Get(ctx, types.NamespacedName{Name: name}, obj); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func (h *priorityClass) OnCreate(c client.Client, decoder *admission.Decoder, recorder record.EventRecorder) capsulewebhook.Func {
|
||||
return func(ctx context.Context, req admission.Request) *admission.Response {
|
||||
pod := &corev1.Pod{}
|
||||
|
||||
@@ -24,13 +24,13 @@ func IsCapsuleUser(ctx context.Context, req admission.Request, clt client.Client
|
||||
if groupList.Find("system:serviceaccounts:kube-system") {
|
||||
return false
|
||||
}
|
||||
// nolint:nestif
|
||||
//nolint:nestif
|
||||
if sets.NewString(req.UserInfo.Groups...).Has("system:serviceaccounts") {
|
||||
parts := strings.Split(req.UserInfo.Username, ":")
|
||||
|
||||
targetNamespace := parts[2]
|
||||
if len(parts) == 4 {
|
||||
targetNamespace := parts[2]
|
||||
|
||||
if len(targetNamespace) > 0 {
|
||||
tl := &capsulev1beta2.TenantList{}
|
||||
if err := clt.List(ctx, tl, client.MatchingFieldsSelector{Selector: fields.OneTermEqualSelector(".status.namespaces", targetNamespace)}); err != nil {
|
||||
return false
|
||||
|
||||
Reference in New Issue
Block a user