mirror of
https://github.com/projectcapsule/capsule.git
synced 2026-02-19 20:39:51 +00:00
Compare commits
13 Commits
v0.2.2
...
helm-v0.3.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3991359bfe | ||
|
|
f0fdab015b | ||
|
|
610a03d0b9 | ||
|
|
018784564a | ||
|
|
8e7078ad4f | ||
|
|
4e5c00fa65 | ||
|
|
d63a9a0ca6 | ||
|
|
7d1772031c | ||
|
|
fe4954f39e | ||
|
|
770ad22170 | ||
|
|
ff17c8b99d | ||
|
|
930f0382d1 | ||
|
|
c059d503d0 |
@@ -1,5 +1,6 @@
|
||||
|
||||
<p align="left">
|
||||
<img src="https://github.com/clastix/capsule/actions/workflows/ci.yml/badge.svg"/>
|
||||
<img src="https://img.shields.io/github/license/clastix/capsule"/>
|
||||
<img src="https://img.shields.io/github/go-mod/go-version/clastix/capsule"/>
|
||||
<a href="https://github.com/clastix/capsule/releases">
|
||||
|
||||
@@ -21,8 +21,8 @@ sources:
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 0.3.1
|
||||
version: 0.3.6
|
||||
|
||||
# This is the version number of the application being deployed.
|
||||
# This version number should be incremented each time you make changes to the application.
|
||||
appVersion: 0.2.1
|
||||
appVersion: 0.2.2
|
||||
|
||||
@@ -66,15 +66,18 @@ Here the values you can override:
|
||||
| certManager.generateCertificates | bool | `false` | Specifies whether capsule webhooks certificates should be generated using cert-manager |
|
||||
| customAnnotations | object | `{}` | Additional annotations which will be added to all resources created by Capsule helm chart |
|
||||
| customLabels | object | `{}` | Additional labels which will be added to all resources created by Capsule helm chart |
|
||||
| imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
|
||||
| jobs.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy of the helm chart job |
|
||||
| jobs.image.repository | string | `"clastix/kubectl"` | Set the image repository of the helm chart job |
|
||||
| jobs.image.tag | string | `""` | Set the image tag of the helm chart job |
|
||||
| mutatingWebhooksTimeoutSeconds | int | `30` | Timeout in seconds for mutating webhooks |
|
||||
| nodeSelector | object | `{}` | Set the node selector for the Capsule pod |
|
||||
| podAnnotations | object | `{}` | Annotations to add to the capsule pod. |
|
||||
| podSecurityContext | object | `{"runAsGroup":1002,"runAsNonRoot":true,"runAsUser":1002,"seccompProfile":{"type":"RuntimeDefault"}}` | Set the securityContext for the Capsule pod |
|
||||
| podSecurityPolicy.enabled | bool | `false` | Specify if a Pod Security Policy must be created |
|
||||
| priorityClassName | string | `""` | Set the priority class name of the Capsule pod |
|
||||
| replicaCount | int | `1` | Set the replica count for capsule pod |
|
||||
| securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | Set the securityContext for the Capsule container |
|
||||
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
|
||||
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
|
||||
| serviceAccount.name | string | `"capsule"` | The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template |
|
||||
@@ -92,7 +95,6 @@ Here the values you can override:
|
||||
| manager.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy. |
|
||||
| manager.image.repository | string | `"clastix/capsule"` | Set the image repository of the capsule. |
|
||||
| manager.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. |
|
||||
| manager.imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
|
||||
| manager.kind | string | `"Deployment"` | Set the controller deployment mode as `Deployment` or `DaemonSet`. |
|
||||
| manager.livenessProbe | object | `{"httpGet":{"path":"/healthz","port":10080}}` | Configure the liveness probe using Deployment probe spec |
|
||||
| manager.options.capsuleUserGroups | list | `["capsule.clastix.io"]` | Override the Capsule user groups |
|
||||
@@ -119,8 +121,6 @@ Here the values you can override:
|
||||
| serviceMonitor.labels | object | `{}` | Assign additional labels according to Prometheus' serviceMonitorSelector matching labels |
|
||||
| serviceMonitor.matchLabels | object | `{}` | Change matching labels |
|
||||
| serviceMonitor.namespace | string | `""` | Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) |
|
||||
| serviceMonitor.serviceAccount.name | string | `"capsule"` | ServiceAccount for Metrics RBAC |
|
||||
| serviceMonitor.serviceAccount.namespace | string | `"capsule-system"` | ServiceAccount Namespace for Metrics RBAC |
|
||||
| serviceMonitor.targetLabels | list | `[]` | Set targetLabels for the serviceMonitor |
|
||||
|
||||
### Webhook Parameters
|
||||
|
||||
@@ -29,6 +29,10 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -84,5 +88,5 @@ spec:
|
||||
resources:
|
||||
{{- toYaml .Values.manager.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -28,6 +28,10 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.manager.hostNetwork }}
|
||||
hostNetwork: true
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -83,5 +87,5 @@ spec:
|
||||
resources:
|
||||
{{- toYaml .Values.manager.resources | nindent 12 }}
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
{{- if .Values.serviceMonitor.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- with .Values.customAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.fullname" . }}-metrics-role
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "capsule.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.labels }}
|
||||
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "capsule.fullname" . }}-metrics-rolebinding
|
||||
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ include "capsule.fullname" . }}-metrics-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceMonitor.serviceAccount.name }}
|
||||
namespace: {{ .Values.serviceMonitor.serviceAccount.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -45,5 +45,11 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -47,4 +47,10 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
|
||||
{{- with .Values.podSecurityContext }}
|
||||
securityContext:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -25,9 +25,6 @@ manager:
|
||||
# -- Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ''
|
||||
|
||||
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Specifies if the container should be started in hostNetwork mode.
|
||||
#
|
||||
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
|
||||
@@ -68,6 +65,9 @@ manager:
|
||||
cpu: 200m
|
||||
memory: 128Mi
|
||||
|
||||
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
|
||||
imagePullSecrets: []
|
||||
|
||||
# -- Annotations to add to the capsule pod.
|
||||
podAnnotations: {}
|
||||
# The following annotations guarantee scheduling for critical add-on pods
|
||||
@@ -77,6 +77,23 @@ podAnnotations: {}
|
||||
# -- Set the priority class name of the Capsule pod
|
||||
priorityClassName: '' # system-cluster-critical
|
||||
|
||||
# -- Set the securityContext for the Capsule pod
|
||||
podSecurityContext:
|
||||
seccompProfile:
|
||||
type: "RuntimeDefault"
|
||||
runAsGroup: 1002
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1002
|
||||
|
||||
|
||||
# -- Set the securityContext for the Capsule container
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
# -- Set the node selector for the Capsule pod
|
||||
nodeSelector: {}
|
||||
# node-role.kubernetes.io/master: ""
|
||||
@@ -212,11 +229,6 @@ serviceMonitor:
|
||||
matchLabels: {}
|
||||
# -- Set targetLabels for the serviceMonitor
|
||||
targetLabels: []
|
||||
serviceAccount:
|
||||
# -- ServiceAccount for Metrics RBAC
|
||||
name: capsule
|
||||
# -- ServiceAccount Namespace for Metrics RBAC
|
||||
namespace: capsule-system
|
||||
endpoint:
|
||||
# -- Set the scrape interval for the endpoint of the serviceMonitor
|
||||
interval: "15s"
|
||||
|
||||
@@ -1,4 +1,2 @@
|
||||
resources:
|
||||
- monitor.yaml
|
||||
- role.yaml
|
||||
- rolebinding.yaml
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: capsule-metrics-role
|
||||
namespace: capsule-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
control-plane: controller-manager
|
||||
name: capsule-metrics-rolebinding
|
||||
namespace: system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: capsule-metrics-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: capsule
|
||||
namespace: capsule-system
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/valyala/fasttemplate"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierr "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -130,81 +131,114 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
|
||||
|
||||
syncErr := new(multierror.Error)
|
||||
|
||||
for nsIndex, item := range spec.NamespacedItems {
|
||||
keysAndValues := []interface{}{"index", nsIndex, "namespace", item.Namespace}
|
||||
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
|
||||
// Namespace: this must be blocked by checking it this is the case.
|
||||
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
|
||||
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Global", keysAndValues...)
|
||||
|
||||
continue
|
||||
}
|
||||
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
|
||||
// creating it to get used by the client List action.
|
||||
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
|
||||
if err != nil {
|
||||
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
objs := unstructured.UnstructuredList{}
|
||||
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
|
||||
|
||||
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
|
||||
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, clientErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
multiErr := new(multierror.Group)
|
||||
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
|
||||
// in case of error during the create or update function, this will be appended to the list of errors.
|
||||
for _, o := range objs.Items {
|
||||
obj := o
|
||||
|
||||
multiErr.Go(func() error {
|
||||
nsItems, nsErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
|
||||
if nsErr != nil {
|
||||
log.Error(err, "unable to sync namespacedItems", keysAndValues...)
|
||||
|
||||
return nsErr
|
||||
}
|
||||
|
||||
processed.Insert(nsItems...)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if objsErr := multiErr.Wait(); objsErr != nil {
|
||||
syncErr = multierror.Append(syncErr, objsErr)
|
||||
}
|
||||
}
|
||||
|
||||
codecFactory := serializer.NewCodecFactory(r.client.Scheme())
|
||||
|
||||
for rawIndex, item := range spec.RawItems {
|
||||
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
|
||||
for _, ns := range namespaces.Items {
|
||||
for nsIndex, item := range spec.NamespacedItems {
|
||||
keysAndValues := []interface{}{"index", nsIndex, "namespace", item.Namespace}
|
||||
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
|
||||
// Namespace: this must be blocked by checking it this is the case.
|
||||
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
|
||||
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Tenant", keysAndValues...)
|
||||
|
||||
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode(item.Raw, nil, &obj); decodeErr != nil {
|
||||
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
|
||||
continue
|
||||
}
|
||||
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
|
||||
// creating it to get used by the client List action.
|
||||
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
|
||||
if selectorErr != nil {
|
||||
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, decodeErr)
|
||||
syncErr = multierror.Append(syncErr, selectorErr)
|
||||
|
||||
continue
|
||||
continue
|
||||
}
|
||||
|
||||
objs := unstructured.UnstructuredList{}
|
||||
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
|
||||
|
||||
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
|
||||
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, clientErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
multiErr := new(multierror.Group)
|
||||
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
|
||||
// in case of error during the create or update function, this will be appended to the list of errors.
|
||||
for _, o := range objs.Items {
|
||||
obj := o
|
||||
obj.SetNamespace(ns.Name)
|
||||
|
||||
multiErr.Go(func() error {
|
||||
kv := keysAndValues
|
||||
kv = append(kv, []interface{}{"resource", fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetNamespace())})
|
||||
|
||||
if opErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); opErr != nil {
|
||||
log.Error(opErr, "unable to sync namespacedItems", kv...)
|
||||
|
||||
return opErr
|
||||
}
|
||||
|
||||
log.Info("resource has been replicated", kv...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
|
||||
replicatedItem.Name = obj.GetName()
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns.Name
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
processed.Insert(replicatedItem.String())
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if objsErr := multiErr.Wait(); objsErr != nil {
|
||||
syncErr = multierror.Append(syncErr, objsErr)
|
||||
}
|
||||
}
|
||||
|
||||
syncedRaw, rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
|
||||
if rawErr != nil {
|
||||
log.Info("unable to sync rawItem", keysAndValues...)
|
||||
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
|
||||
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
|
||||
syncErr = multierror.Append(syncErr, rawErr)
|
||||
} else {
|
||||
processed.Insert(syncedRaw...)
|
||||
for rawIndex, item := range spec.RawItems {
|
||||
template := string(item.Raw)
|
||||
|
||||
t := fasttemplate.New(template, "{{ ", " }}")
|
||||
|
||||
tmplString := t.ExecuteString(map[string]interface{}{
|
||||
"tenant.name": tnt.Name,
|
||||
"namespace": ns.Name,
|
||||
})
|
||||
|
||||
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
|
||||
|
||||
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode([]byte(tmplString), nil, &obj); decodeErr != nil {
|
||||
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
|
||||
|
||||
syncErr = multierror.Append(syncErr, decodeErr)
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
obj.SetNamespace(ns.Name)
|
||||
|
||||
if rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); rawErr != nil {
|
||||
log.Info("unable to sync rawItem", keysAndValues...)
|
||||
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
|
||||
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
|
||||
syncErr = multierror.Append(syncErr, rawErr)
|
||||
} else {
|
||||
log.Info("resource has been replicated", keysAndValues...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
|
||||
replicatedItem.Name = obj.GetName()
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns.Name
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
processed.Insert(replicatedItem.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,64 +248,26 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
|
||||
// createOrUpdate replicates the provided unstructured object to all the provided Namespaces:
|
||||
// this function mimics the CreateOrUpdate, by retrieving the object to understand if it must be created or updated,
|
||||
// along adding the additional metadata, if required.
|
||||
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string, namespaces corev1.NamespaceList) ([]string, error) {
|
||||
log := ctrllog.FromContext(ctx)
|
||||
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string) (err error) {
|
||||
actual, desired := &unstructured.Unstructured{}, obj.DeepCopy()
|
||||
|
||||
errGroup := new(multierror.Group)
|
||||
actual.SetAPIVersion(desired.GetAPIVersion())
|
||||
actual.SetKind(desired.GetKind())
|
||||
actual.SetNamespace(desired.GetNamespace())
|
||||
actual.SetName(desired.GetName())
|
||||
|
||||
var items []string
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
|
||||
UID := actual.GetUID()
|
||||
rv := actual.GetResourceVersion()
|
||||
|
||||
for _, item := range namespaces.Items {
|
||||
ns := item.GetName()
|
||||
actual.SetUnstructuredContent(desired.Object)
|
||||
actual.SetLabels(labels)
|
||||
actual.SetAnnotations(annotations)
|
||||
actual.SetResourceVersion(rv)
|
||||
actual.SetUID(UID)
|
||||
|
||||
errGroup.Go(func() (err error) {
|
||||
actual, desired := obj.DeepCopy(), obj.DeepCopy()
|
||||
// Using a deferred function to properly log the results, and adding the item to the processed set.
|
||||
defer func() {
|
||||
keysAndValues := []interface{}{"resource", fmt.Sprintf("%s/%s", ns, desired.GetName())}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, "unable to replicate resource", keysAndValues...)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
log.Info("resource has been replicated", keysAndValues...)
|
||||
|
||||
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{
|
||||
Name: obj.GetName(),
|
||||
}
|
||||
replicatedItem.Kind = obj.GetKind()
|
||||
replicatedItem.Namespace = ns
|
||||
replicatedItem.APIVersion = obj.GetAPIVersion()
|
||||
|
||||
items = append(items, replicatedItem.String())
|
||||
}()
|
||||
|
||||
actual.SetNamespace(ns)
|
||||
|
||||
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
|
||||
UID := actual.GetUID()
|
||||
rv := actual.GetResourceVersion()
|
||||
|
||||
actual.SetUnstructuredContent(desired.Object)
|
||||
actual.SetNamespace(ns)
|
||||
actual.SetLabels(labels)
|
||||
actual.SetAnnotations(annotations)
|
||||
actual.SetResourceVersion(rv)
|
||||
actual.SetUID(UID)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return
|
||||
})
|
||||
}
|
||||
// Wait returns *multierror.Error that implements stdlib error:
|
||||
// the nil check must be performed down here rather than at the caller level to avoid wrong casting.
|
||||
if err := errGroup.Wait(); err != nil {
|
||||
return items, err
|
||||
}
|
||||
|
||||
return items, nil
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -104,6 +104,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-1",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -118,6 +122,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-2",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -132,6 +140,10 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
Name: "raw-secret-3",
|
||||
},
|
||||
Type: corev1.SecretTypeOpaque,
|
||||
Data: map[string][]byte{
|
||||
"{{ tenant.name }}": []byte("Cg=="),
|
||||
"{{ namespace }}": []byte("Cg=="),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -220,6 +232,16 @@ var _ = Describe("Creating a TenantResource object", func() {
|
||||
return secrets.Items
|
||||
}, defaultTimeoutInterval, defaultPollInterval).Should(HaveLen(4))
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("ensuring raw items are templated in %s Namespace", ns), func() {
|
||||
for _, name := range []string{"raw-secret-1", "raw-secret-2", "raw-secret-3"} {
|
||||
secret := corev1.Secret{}
|
||||
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: ns}, &secret)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(secret.Data).To(HaveKey(solar.Name))
|
||||
Expect(secret.Data).To(HaveKey(ns))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
By("using a Namespace selector", func() {
|
||||
|
||||
2
go.mod
2
go.mod
@@ -59,6 +59,8 @@ require (
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -482,6 +482,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
||||
145
hack/local-test-with-kind.sh
Executable file
145
hack/local-test-with-kind.sh
Executable file
@@ -0,0 +1,145 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script test capsule with kind
|
||||
# Good to use it before pull request
|
||||
|
||||
USER=alice
|
||||
TENANT=oil
|
||||
GROUP=capsule.clastix.io
|
||||
KIND_CLUSTER_NAME=capsule-local-test
|
||||
|
||||
function error_action() {
|
||||
cleanup_action
|
||||
exit 1
|
||||
}
|
||||
|
||||
function cleanup_action() {
|
||||
kind delete cluster --name=${KIND_CLUSTER_NAME}
|
||||
rm -f ./tenant-test.yaml
|
||||
rm -f ${USER}-${TENANT}.crt
|
||||
rm -f ${USER}-${TENANT}.key
|
||||
rm -f ${USER}-${TENANT}.kubeconfig
|
||||
}
|
||||
|
||||
function check_command() {
|
||||
local command=$1
|
||||
|
||||
if ! command -v $command &> /dev/null; then
|
||||
echo "Error: ${command} not found"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_command kind
|
||||
check_command kubectl
|
||||
|
||||
### Prepare Kind cluster
|
||||
|
||||
echo `date`": INFO: Create Kind Cluster"
|
||||
error_create_kind=$(kind create cluster --name=${KIND_CLUSTER_NAME} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_create_kind"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Wait then Kind cluster be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get nodes | grep " Ready " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
((counter++))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo `date`": INFO: Kind cluster ready"
|
||||
|
||||
### Install helm capsule to Kind
|
||||
|
||||
echo `date`": INFO: Install helm capsule"
|
||||
error_install_helm=$(helm install capsule ./charts/capsule/ -n capsule-system --create-namespace 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_install_helm"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Wait then capsule POD be ready. Wait only 30 seconds"
|
||||
counter=0
|
||||
while true
|
||||
do
|
||||
if [ $counter == 30 ]; then
|
||||
echo `date`": ERROR: Kind cluster not ready for too long"
|
||||
error_action
|
||||
fi
|
||||
|
||||
kubectl get pod -n capsule-system | grep " Running " &>/dev/null
|
||||
if [ $? == 0 ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
((counter++))
|
||||
sleep 1
|
||||
done
|
||||
sleep 5
|
||||
|
||||
echo `date`": INFO: Capsule ready"
|
||||
|
||||
### Tests
|
||||
|
||||
echo `date`": INFO: Create tenant"
|
||||
cat >>./tenant-test.yaml<<EOF
|
||||
apiVersion: capsule.clastix.io/v1beta2
|
||||
kind: Tenant
|
||||
metadata:
|
||||
name: ${TENANT}
|
||||
spec:
|
||||
owners:
|
||||
- name: ${USER}
|
||||
kind: User
|
||||
EOF
|
||||
|
||||
error_create_tenant=$(kubectl create -f ./tenant-test.yaml 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": $error_create_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Check tenant exist"
|
||||
error_check_tenant=$(kubectl get tenant ${TENANT} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_check_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Create user ${USER} for tenant ${TENANT}"
|
||||
error_create_user=$(./hack/create-user.sh ${USER} ${TENANT} 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_create_user"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Create namespace from tenant user"
|
||||
error_create_namespace=$(kubectl --kubeconfig=${USER}-${TENANT}.kubeconfig create ns ${TENANT}-test 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_create_namespace"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: Check namespace exist in tenant"
|
||||
error_tenant=$(kubectl get tenant ${TENANT} -o yaml | grep namespaces -A1 | grep ${TENANT}-test 2>&1)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo `date`": ERROR: $error_tenant"
|
||||
error_action
|
||||
fi
|
||||
|
||||
echo `date`": INFO: All ok"
|
||||
|
||||
cleanup_action
|
||||
Reference in New Issue
Block a user