Compare commits

..

13 Commits

Author SHA1 Message Date
Sagar Jadhav
3991359bfe chore(helm): bump up the version
Signed-off-by: Sagar Jadhav <sagarj2@vmware.com>
2023-03-02 11:00:22 +01:00
r3drun3
f0fdab015b docs(readme): add ci status badge 2023-02-26 14:49:22 +01:00
Zemtsov Vladimir
610a03d0b9 fix(helm): move imagePullSecrets to root values
Signed-off-by: Zemtsov Vladimir <zvlb>
2023-02-16 11:08:23 +01:00
Dario Tranchitella
018784564a test(e2e): template support for rawitems 2023-02-16 09:20:42 +01:00
Dario Tranchitella
8e7078ad4f feat: template support for rawitems
Allowed template values:
- `{{ tenant.name }}` for the Tenant name managing the Namespace
- `{{ namespace }}` for the Namespace where the resource is replicated
2023-02-16 09:20:42 +01:00
Dario Tranchitella
4e5c00fa65 refactor: optimizing processing of tenant resources per namespace 2023-02-16 09:20:42 +01:00
Dario Tranchitella
d63a9a0ca6 fix: creation of namespaced resources backed by cache 2023-02-16 09:20:42 +01:00
Zemtsov Vladimir
7d1772031c feat: add bash script for local-test capsule
Signed-off-by: Zemtsov Vladimir <zemtsov.v@mail366.com>
2023-02-15 17:04:32 +01:00
Zemtsov Vladimir
fe4954f39e feat(helm): add securityContexts to jobs
Signed-off-by: Zemtsov Vladimir <zemtsov.v@mail366.com>
2023-02-14 18:40:03 +01:00
Vladimir
770ad22170 feat(helm): add control for securityContext
Signed-off-by: Zemtsov Vladimir <zemtsov.v@mail366.com>
Co-authored-by: Zemtsov Vladimir <zemtsov.v@mail366.com>
2023-02-10 12:52:41 +01:00
dependabot[bot]
ff17c8b99d build(deps): bump go-restful
Bumps [github.com/emicklei/go-restful](https://github.com/emicklei/go-restful) from 2.15.0+incompatible to 2.16.0+incompatible.
- [Release notes](https://github.com/emicklei/go-restful/releases)
- [Changelog](https://github.com/emicklei/go-restful/blob/v3/CHANGES.md)
- [Commits](https://github.com/emicklei/go-restful/compare/v2.15.0...v2.16.0)

---
updated-dependencies:
- dependency-name: github.com/emicklei/go-restful
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-02-09 18:45:43 +01:00
Dario Tranchitella
930f0382d1 refactor(helm): removing unrequired RBAC for metrics
Providing the required RBAC is not part of the Capsule scope, rather,
it should be address by the Prometheus setup.

Reference: https://github.com/clastix/capsule/issues/696#issuecomment-1420611891
2023-02-07 15:31:32 +01:00
Dario Tranchitella
c059d503d0 refactor(kustomize): removing unrequired RBAC for metrics 2023-02-07 15:31:32 +01:00
17 changed files with 340 additions and 219 deletions

View File

@@ -1,5 +1,6 @@
<p align="left">
<img src="https://github.com/clastix/capsule/actions/workflows/ci.yml/badge.svg"/>
<img src="https://img.shields.io/github/license/clastix/capsule"/>
<img src="https://img.shields.io/github/go-mod/go-version/clastix/capsule"/>
<a href="https://github.com/clastix/capsule/releases">

View File

@@ -21,8 +21,8 @@ sources:
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.3.1
version: 0.3.6
# This is the version number of the application being deployed.
# This version number should be incremented each time you make changes to the application.
appVersion: 0.2.1
appVersion: 0.2.2

View File

@@ -66,15 +66,18 @@ Here the values you can override:
| certManager.generateCertificates | bool | `false` | Specifies whether capsule webhooks certificates should be generated using cert-manager |
| customAnnotations | object | `{}` | Additional annotations which will be added to all resources created by Capsule helm chart |
| customLabels | object | `{}` | Additional labels which will be added to all resources created by Capsule helm chart |
| imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
| jobs.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy of the helm chart job |
| jobs.image.repository | string | `"clastix/kubectl"` | Set the image repository of the helm chart job |
| jobs.image.tag | string | `""` | Set the image tag of the helm chart job |
| mutatingWebhooksTimeoutSeconds | int | `30` | Timeout in seconds for mutating webhooks |
| nodeSelector | object | `{}` | Set the node selector for the Capsule pod |
| podAnnotations | object | `{}` | Annotations to add to the capsule pod. |
| podSecurityContext | object | `{"runAsGroup":1002,"runAsNonRoot":true,"runAsUser":1002,"seccompProfile":{"type":"RuntimeDefault"}}` | Set the securityContext for the Capsule pod |
| podSecurityPolicy.enabled | bool | `false` | Specify if a Pod Security Policy must be created |
| priorityClassName | string | `""` | Set the priority class name of the Capsule pod |
| replicaCount | int | `1` | Set the replica count for capsule pod |
| securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"readOnlyRootFilesystem":true}` | Set the securityContext for the Capsule container |
| serviceAccount.annotations | object | `{}` | Annotations to add to the service account. |
| serviceAccount.create | bool | `true` | Specifies whether a service account should be created. |
| serviceAccount.name | string | `"capsule"` | The name of the service account to use. If not set and `serviceAccount.create=true`, a name is generated using the fullname template |
@@ -92,7 +95,6 @@ Here the values you can override:
| manager.image.pullPolicy | string | `"IfNotPresent"` | Set the image pull policy. |
| manager.image.repository | string | `"clastix/capsule"` | Set the image repository of the capsule. |
| manager.image.tag | string | `""` | Overrides the image tag whose default is the chart appVersion. |
| manager.imagePullSecrets | list | `[]` | Configuration for `imagePullSecrets` so that you can use a private images registry. |
| manager.kind | string | `"Deployment"` | Set the controller deployment mode as `Deployment` or `DaemonSet`. |
| manager.livenessProbe | object | `{"httpGet":{"path":"/healthz","port":10080}}` | Configure the liveness probe using Deployment probe spec |
| manager.options.capsuleUserGroups | list | `["capsule.clastix.io"]` | Override the Capsule user groups |
@@ -119,8 +121,6 @@ Here the values you can override:
| serviceMonitor.labels | object | `{}` | Assign additional labels according to Prometheus' serviceMonitorSelector matching labels |
| serviceMonitor.matchLabels | object | `{}` | Change matching labels |
| serviceMonitor.namespace | string | `""` | Install the ServiceMonitor into a different Namespace, as the monitoring stack one (default: the release one) |
| serviceMonitor.serviceAccount.name | string | `"capsule"` | ServiceAccount for Metrics RBAC |
| serviceMonitor.serviceAccount.namespace | string | `"capsule-system"` | ServiceAccount Namespace for Metrics RBAC |
| serviceMonitor.targetLabels | list | `[]` | Set targetLabels for the serviceMonitor |
### Webhook Parameters

View File

@@ -29,6 +29,10 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.manager.hostNetwork }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
@@ -84,5 +88,5 @@ spec:
resources:
{{- toYaml .Values.manager.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}

View File

@@ -28,6 +28,10 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.manager.hostNetwork }}
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
@@ -83,5 +87,5 @@ spec:
resources:
{{- toYaml .Values.manager.resources | nindent 12 }}
securityContext:
allowPrivilegeEscalation: false
{{- toYaml .Values.securityContext | nindent 12 }}
{{- end }}

View File

@@ -1,46 +0,0 @@
{{- if .Values.serviceMonitor.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
{{- include "capsule.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
{{- with .Values.customAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
name: {{ include "capsule.fullname" . }}-metrics-role
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
{{- include "capsule.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.labels }}
{{- toYaml .Values.serviceMonitor.labels | nindent 4 }}
{{- end }}
name: {{ include "capsule.fullname" . }}-metrics-rolebinding
namespace: {{ .Values.serviceMonitor.namespace | default .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ include "capsule.fullname" . }}-metrics-role
subjects:
- kind: ServiceAccount
name: {{ .Values.serviceMonitor.serviceAccount.name }}
namespace: {{ .Values.serviceMonitor.serviceAccount.namespace | default .Release.Namespace }}
{{- end }}

View File

@@ -45,5 +45,11 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@@ -47,4 +47,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
serviceAccountName: {{ include "capsule.serviceAccountName" . }}
{{- with .Values.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -25,9 +25,6 @@ manager:
# -- Overrides the image tag whose default is the chart appVersion.
tag: ''
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
imagePullSecrets: []
# -- Specifies if the container should be started in hostNetwork mode.
#
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
@@ -68,6 +65,9 @@ manager:
cpu: 200m
memory: 128Mi
# -- Configuration for `imagePullSecrets` so that you can use a private images registry.
imagePullSecrets: []
# -- Annotations to add to the capsule pod.
podAnnotations: {}
# The following annotations guarantee scheduling for critical add-on pods
@@ -77,6 +77,23 @@ podAnnotations: {}
# -- Set the priority class name of the Capsule pod
priorityClassName: '' # system-cluster-critical
# -- Set the securityContext for the Capsule pod
podSecurityContext:
seccompProfile:
type: "RuntimeDefault"
runAsGroup: 1002
runAsNonRoot: true
runAsUser: 1002
# -- Set the securityContext for the Capsule container
securityContext:
capabilities:
drop:
- ALL
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
# -- Set the node selector for the Capsule pod
nodeSelector: {}
# node-role.kubernetes.io/master: ""
@@ -212,11 +229,6 @@ serviceMonitor:
matchLabels: {}
# -- Set targetLabels for the serviceMonitor
targetLabels: []
serviceAccount:
# -- ServiceAccount for Metrics RBAC
name: capsule
# -- ServiceAccount Namespace for Metrics RBAC
namespace: capsule-system
endpoint:
# -- Set the scrape interval for the endpoint of the serviceMonitor
interval: "15s"

View File

@@ -1,4 +1,2 @@
resources:
- monitor.yaml
- role.yaml
- rolebinding.yaml

View File

@@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
control-plane: controller-manager
name: capsule-metrics-role
namespace: capsule-system
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- pods
verbs:
- get
- list
- watch

View File

@@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
control-plane: controller-manager
name: capsule-metrics-rolebinding
namespace: system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: capsule-metrics-role
subjects:
- kind: ServiceAccount
name: capsule
namespace: capsule-system

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"github.com/hashicorp/go-multierror"
"github.com/valyala/fasttemplate"
corev1 "k8s.io/api/core/v1"
apierr "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -130,81 +131,114 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
syncErr := new(multierror.Error)
for nsIndex, item := range spec.NamespacedItems {
keysAndValues := []interface{}{"index", nsIndex, "namespace", item.Namespace}
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
// Namespace: this must be blocked by checking it this is the case.
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Global", keysAndValues...)
continue
}
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
// creating it to get used by the client List action.
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
if err != nil {
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
continue
}
objs := unstructured.UnstructuredList{}
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
syncErr = multierror.Append(syncErr, clientErr)
continue
}
multiErr := new(multierror.Group)
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
// in case of error during the create or update function, this will be appended to the list of errors.
for _, o := range objs.Items {
obj := o
multiErr.Go(func() error {
nsItems, nsErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
if nsErr != nil {
log.Error(err, "unable to sync namespacedItems", keysAndValues...)
return nsErr
}
processed.Insert(nsItems...)
return nil
})
}
if objsErr := multiErr.Wait(); objsErr != nil {
syncErr = multierror.Append(syncErr, objsErr)
}
}
codecFactory := serializer.NewCodecFactory(r.client.Scheme())
for rawIndex, item := range spec.RawItems {
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
for _, ns := range namespaces.Items {
for nsIndex, item := range spec.NamespacedItems {
keysAndValues := []interface{}{"index", nsIndex, "namespace", item.Namespace}
// A TenantResource is created by a TenantOwner, and potentially, they could point to a resource in a non-owned
// Namespace: this must be blocked by checking it this is the case.
if !allowCrossNamespaceSelection && !tntNamespaces.Has(item.Namespace) {
log.Info("skipping processing of namespacedItem, referring a Namespace that is not part of the given Tenant", keysAndValues...)
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode(item.Raw, nil, &obj); decodeErr != nil {
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
continue
}
// Namespaced Items are relying on selecting resources, rather than specifying a specific name:
// creating it to get used by the client List action.
itemSelector, selectorErr := metav1.LabelSelectorAsSelector(&item.Selector)
if selectorErr != nil {
log.Error(selectorErr, "cannot create Selector for namespacedItem", keysAndValues...)
syncErr = multierror.Append(syncErr, decodeErr)
syncErr = multierror.Append(syncErr, selectorErr)
continue
continue
}
objs := unstructured.UnstructuredList{}
objs.SetGroupVersionKind(schema.FromAPIVersionAndKind(item.APIVersion, fmt.Sprintf("%sList", item.Kind)))
if clientErr := r.client.List(ctx, &objs, client.InNamespace(item.Namespace), client.MatchingLabelsSelector{Selector: itemSelector}); clientErr != nil {
log.Error(clientErr, "cannot retrieve object for namespacedItem", keysAndValues...)
syncErr = multierror.Append(syncErr, clientErr)
continue
}
multiErr := new(multierror.Group)
// Iterating over all the retrieved objects from the resource spec to get replicated in all the selected Namespaces:
// in case of error during the create or update function, this will be appended to the list of errors.
for _, o := range objs.Items {
obj := o
obj.SetNamespace(ns.Name)
multiErr.Go(func() error {
kv := keysAndValues
kv = append(kv, []interface{}{"resource", fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetNamespace())})
if opErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); opErr != nil {
log.Error(opErr, "unable to sync namespacedItems", kv...)
return opErr
}
log.Info("resource has been replicated", kv...)
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
replicatedItem.Name = obj.GetName()
replicatedItem.Kind = obj.GetKind()
replicatedItem.Namespace = ns.Name
replicatedItem.APIVersion = obj.GetAPIVersion()
processed.Insert(replicatedItem.String())
return nil
})
}
if objsErr := multiErr.Wait(); objsErr != nil {
syncErr = multierror.Append(syncErr, objsErr)
}
}
syncedRaw, rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations, namespaces)
if rawErr != nil {
log.Info("unable to sync rawItem", keysAndValues...)
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
syncErr = multierror.Append(syncErr, rawErr)
} else {
processed.Insert(syncedRaw...)
for rawIndex, item := range spec.RawItems {
template := string(item.Raw)
t := fasttemplate.New(template, "{{ ", " }}")
tmplString := t.ExecuteString(map[string]interface{}{
"tenant.name": tnt.Name,
"namespace": ns.Name,
})
obj, keysAndValues := unstructured.Unstructured{}, []interface{}{"index", rawIndex}
if _, _, decodeErr := codecFactory.UniversalDeserializer().Decode([]byte(tmplString), nil, &obj); decodeErr != nil {
log.Error(decodeErr, "unable to deserialize rawItem", keysAndValues...)
syncErr = multierror.Append(syncErr, decodeErr)
continue
}
obj.SetNamespace(ns.Name)
if rawErr := r.createOrUpdate(ctx, &obj, objLabels, objAnnotations); rawErr != nil {
log.Info("unable to sync rawItem", keysAndValues...)
// In case of error processing an item in one of any selected Namespaces, storing it to report it lately
// to the upper call to ensure a partial sync that will be fixed by a subsequent reconciliation.
syncErr = multierror.Append(syncErr, rawErr)
} else {
log.Info("resource has been replicated", keysAndValues...)
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{}
replicatedItem.Name = obj.GetName()
replicatedItem.Kind = obj.GetKind()
replicatedItem.Namespace = ns.Name
replicatedItem.APIVersion = obj.GetAPIVersion()
processed.Insert(replicatedItem.String())
}
}
}
@@ -214,64 +248,26 @@ func (r *Processor) HandleSection(ctx context.Context, tnt capsulev1beta2.Tenant
// createOrUpdate replicates the provided unstructured object to all the provided Namespaces:
// this function mimics the CreateOrUpdate, by retrieving the object to understand if it must be created or updated,
// along adding the additional metadata, if required.
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string, namespaces corev1.NamespaceList) ([]string, error) {
log := ctrllog.FromContext(ctx)
func (r *Processor) createOrUpdate(ctx context.Context, obj *unstructured.Unstructured, labels map[string]string, annotations map[string]string) (err error) {
actual, desired := &unstructured.Unstructured{}, obj.DeepCopy()
errGroup := new(multierror.Group)
actual.SetAPIVersion(desired.GetAPIVersion())
actual.SetKind(desired.GetKind())
actual.SetNamespace(desired.GetNamespace())
actual.SetName(desired.GetName())
var items []string
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
UID := actual.GetUID()
rv := actual.GetResourceVersion()
for _, item := range namespaces.Items {
ns := item.GetName()
actual.SetUnstructuredContent(desired.Object)
actual.SetLabels(labels)
actual.SetAnnotations(annotations)
actual.SetResourceVersion(rv)
actual.SetUID(UID)
errGroup.Go(func() (err error) {
actual, desired := obj.DeepCopy(), obj.DeepCopy()
// Using a deferred function to properly log the results, and adding the item to the processed set.
defer func() {
keysAndValues := []interface{}{"resource", fmt.Sprintf("%s/%s", ns, desired.GetName())}
return nil
})
if err != nil {
log.Error(err, "unable to replicate resource", keysAndValues...)
return
}
log.Info("resource has been replicated", keysAndValues...)
replicatedItem := &capsulev1beta2.ObjectReferenceStatus{
Name: obj.GetName(),
}
replicatedItem.Kind = obj.GetKind()
replicatedItem.Namespace = ns
replicatedItem.APIVersion = obj.GetAPIVersion()
items = append(items, replicatedItem.String())
}()
actual.SetNamespace(ns)
_, err = controllerutil.CreateOrUpdate(ctx, r.client, actual, func() error {
UID := actual.GetUID()
rv := actual.GetResourceVersion()
actual.SetUnstructuredContent(desired.Object)
actual.SetNamespace(ns)
actual.SetLabels(labels)
actual.SetAnnotations(annotations)
actual.SetResourceVersion(rv)
actual.SetUID(UID)
return nil
})
return
})
}
// Wait returns *multierror.Error that implements stdlib error:
// the nil check must be performed down here rather than at the caller level to avoid wrong casting.
if err := errGroup.Wait(); err != nil {
return items, err
}
return items, nil
return err
}

View File

@@ -104,6 +104,10 @@ var _ = Describe("Creating a TenantResource object", func() {
Name: "raw-secret-1",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"{{ tenant.name }}": []byte("Cg=="),
"{{ namespace }}": []byte("Cg=="),
},
},
},
},
@@ -118,6 +122,10 @@ var _ = Describe("Creating a TenantResource object", func() {
Name: "raw-secret-2",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"{{ tenant.name }}": []byte("Cg=="),
"{{ namespace }}": []byte("Cg=="),
},
},
},
},
@@ -132,6 +140,10 @@ var _ = Describe("Creating a TenantResource object", func() {
Name: "raw-secret-3",
},
Type: corev1.SecretTypeOpaque,
Data: map[string][]byte{
"{{ tenant.name }}": []byte("Cg=="),
"{{ namespace }}": []byte("Cg=="),
},
},
},
},
@@ -220,6 +232,16 @@ var _ = Describe("Creating a TenantResource object", func() {
return secrets.Items
}, defaultTimeoutInterval, defaultPollInterval).Should(HaveLen(4))
})
By(fmt.Sprintf("ensuring raw items are templated in %s Namespace", ns), func() {
for _, name := range []string{"raw-secret-1", "raw-secret-2", "raw-secret-3"} {
secret := corev1.Secret{}
Expect(k8sClient.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: ns}, &secret)).ToNot(HaveOccurred())
Expect(secret.Data).To(HaveKey(solar.Name))
Expect(secret.Data).To(HaveKey(ns))
}
})
}
By("using a Namespace selector", func() {

2
go.mod
View File

@@ -59,6 +59,8 @@ require (
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/valyala/fasttemplate v1.2.2 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect

4
go.sum
View File

@@ -482,6 +482,10 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=

145
hack/local-test-with-kind.sh Executable file
View File

@@ -0,0 +1,145 @@
#!/usr/bin/env bash
# This script test capsule with kind
# Good to use it before pull request
USER=alice
TENANT=oil
GROUP=capsule.clastix.io
KIND_CLUSTER_NAME=capsule-local-test
function error_action() {
cleanup_action
exit 1
}
function cleanup_action() {
kind delete cluster --name=${KIND_CLUSTER_NAME}
rm -f ./tenant-test.yaml
rm -f ${USER}-${TENANT}.crt
rm -f ${USER}-${TENANT}.key
rm -f ${USER}-${TENANT}.kubeconfig
}
function check_command() {
local command=$1
if ! command -v $command &> /dev/null; then
echo "Error: ${command} not found"
exit 1
fi
}
check_command kind
check_command kubectl
### Prepare Kind cluster
echo `date`": INFO: Create Kind Cluster"
error_create_kind=$(kind create cluster --name=${KIND_CLUSTER_NAME} 2>&1)
if [ $? -ne 0 ]; then
echo `date`": $error_create_kind"
exit 1
fi
echo `date`": INFO: Wait then Kind cluster be ready. Wait only 30 seconds"
counter=0
while true
do
if [ $counter == 30 ]; then
echo `date`": ERROR: Kind cluster not ready for too long"
error_action
fi
kubectl get nodes | grep " Ready " &>/dev/null
if [ $? == 0 ]; then
break
fi
((counter++))
sleep 1
done
echo `date`": INFO: Kind cluster ready"
### Install helm capsule to Kind
echo `date`": INFO: Install helm capsule"
error_install_helm=$(helm install capsule ./charts/capsule/ -n capsule-system --create-namespace 2>&1)
if [ $? -ne 0 ]; then
echo `date`": $error_install_helm"
exit 1
fi
echo `date`": INFO: Wait then capsule POD be ready. Wait only 30 seconds"
counter=0
while true
do
if [ $counter == 30 ]; then
echo `date`": ERROR: Kind cluster not ready for too long"
error_action
fi
kubectl get pod -n capsule-system | grep " Running " &>/dev/null
if [ $? == 0 ]; then
break
fi
((counter++))
sleep 1
done
sleep 5
echo `date`": INFO: Capsule ready"
### Tests
echo `date`": INFO: Create tenant"
cat >>./tenant-test.yaml<<EOF
apiVersion: capsule.clastix.io/v1beta2
kind: Tenant
metadata:
name: ${TENANT}
spec:
owners:
- name: ${USER}
kind: User
EOF
error_create_tenant=$(kubectl create -f ./tenant-test.yaml 2>&1)
if [ $? -ne 0 ]; then
echo `date`": $error_create_tenant"
error_action
fi
echo `date`": INFO: Check tenant exist"
error_check_tenant=$(kubectl get tenant ${TENANT} 2>&1)
if [ $? -ne 0 ]; then
echo `date`": ERROR: $error_check_tenant"
error_action
fi
echo `date`": INFO: Create user ${USER} for tenant ${TENANT}"
error_create_user=$(./hack/create-user.sh ${USER} ${TENANT} 2>&1)
if [ $? -ne 0 ]; then
echo `date`": ERROR: $error_create_user"
error_action
fi
echo `date`": INFO: Create namespace from tenant user"
error_create_namespace=$(kubectl --kubeconfig=${USER}-${TENANT}.kubeconfig create ns ${TENANT}-test 2>&1)
if [ $? -ne 0 ]; then
echo `date`": ERROR: $error_create_namespace"
error_action
fi
echo `date`": INFO: Check namespace exist in tenant"
error_tenant=$(kubectl get tenant ${TENANT} -o yaml | grep namespaces -A1 | grep ${TENANT}-test 2>&1)
if [ $? -ne 0 ]; then
echo `date`": ERROR: $error_tenant"
error_action
fi
echo `date`": INFO: All ok"
cleanup_action